diff --git a/Makefile b/Makefile index 2ba562cc7..768951a9f 100644 --- a/Makefile +++ b/Makefile @@ -12,6 +12,7 @@ endif REPO = github.com/operator-framework/api BUILD_PATH = $(REPO)/cmd/operator-verify PKGS = $(shell go list ./... | grep -v /vendor/) +YQ_INTERNAL := $(Q) go run $(MOD_FLAGS) ./vendor/github.com/mikefarah/yq/v2/ .PHONY: help help: ## Show this help screen @@ -23,7 +24,7 @@ help: ## Show this help screen .PHONY: install -install: ## Build & install the operator-verify +install: ## Build & install operator-verify $(Q)go install \ -gcflags "all=-trimpath=${GOPATH}" \ @@ -35,7 +36,7 @@ install: ## Build & install the operator-verify $(BUILD_PATH) # Code management. -.PHONY: format tidy clean +.PHONY: format tidy clean vendor generate format: ## Format the source code $(Q)go fmt $(PKGS) @@ -43,14 +44,27 @@ format: ## Format the source code tidy: ## Update dependencies $(Q)go mod tidy -v +vendor: tidy ## Update vendor directory + $(Q)go mod vendor + clean: ## Clean up the build artifacts $(Q)rm -rf build -############################## -# Tests # -############################## +generate: controller-gen ## Generate code + $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./... + +manifests: controller-gen ## Generate manifests e.g. CRD, RBAC etc + @# Handle >v1 APIs + $(CONTROLLER_GEN) crd paths=./pkg/operators/v2alpha1... output:crd:dir=./crds + + @# Handle <=v1 APIs + $(CONTROLLER_GEN) schemapatch:manifests=./crds output:dir=./crds paths=./pkg/operators/... + + @# Preserve unknown fields on the CSV spec (prevents install strategy from being pruned) + $(YQ_INTERNAL) w --inplace ./crds/operators.coreos.com_clusterserviceversions.yaml spec.validation.openAPIV3Schema.properties.spec.properties.install.properties.spec.properties.deployments.items.properties.spec.properties.template.properties.metadata.x-kubernetes-preserve-unknown-fields true -##@ Tests + @# Update embedded CRD files. + @go generate ./crds/... # Static tests. .PHONY: test test-unit @@ -59,4 +73,11 @@ test: test-unit ## Run the tests TEST_PKGS:=$(shell go list ./...) test-unit: ## Run the unit tests - $(Q)go test -short ${TEST_PKGS} + $(Q)go test -count=1 -short ${TEST_PKGS} + +# Utilities. +.PHONY: controller-gen + +controller-gen: vendor ## Find or download controller-gen +CONTROLLER_GEN=$(Q)go run -mod=vendor ./vendor/sigs.k8s.io/controller-tools/cmd/controller-gen + diff --git a/crds/defs.go b/crds/defs.go new file mode 100644 index 000000000..abc56ceda --- /dev/null +++ b/crds/defs.go @@ -0,0 +1,117 @@ +package crds + +// Generate embedded files from CRDs to avoid file path changes when this package is imported. +//go:generate go run github.com/go-bindata/go-bindata/v3/go-bindata -pkg crds -o zz_defs.go -ignore=.*\.go . + +import ( + "bytes" + "fmt" + "sync" + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/yaml" +) + +// crdFile is a descriptor of a file containing a CustomResourceDefinition. +type crdFile string + +// path returns the path of the file. +func (c crdFile) path() string { + s := string(c) + return s +} + +// mustUnmarshal unmarshals the file into a CRD and panics on failure. +func (c crdFile) mustUnmarshal() *apiextensionsv1.CustomResourceDefinition { + path := c.path() + data, err := Asset(path) + if err != nil { + panic(fmt.Errorf("unable to read crd file %s: %s", path, err)) + } + + u := &unstructured.Unstructured{} + reader := bytes.NewReader(data) + decoder := yaml.NewYAMLOrJSONDecoder(reader, 30) + if err = decoder.Decode(u); err != nil { + panic(fmt.Errorf("crd unmarshaling failed: %s", err)) + } + + // Step through unversioned type to support v1beta1 -> v1 + unversioned := &apiextensions.CustomResourceDefinition{} + if err = scheme.Convert(u, unversioned, nil); err != nil { + panic(fmt.Errorf("failed to convert crd: %s\nto v1: %s", u, err)) + } + + crd := &apiextensionsv1.CustomResourceDefinition{} + if err = scheme.Convert(unversioned, crd, nil); err != nil { + panic(fmt.Errorf("failed to convert crd: %s\nto v1: %s", u, err)) + } + + return crd +} + +var ( + lock sync.Mutex + + // loaded stores previously unmarshaled CustomResourceDefinitions indexed by their file descriptor. + loaded = map[crdFile]*apiextensionsv1.CustomResourceDefinition{} + // scheme provides conversions between type versions. + scheme = runtime.NewScheme() +) + +func init() { + // Add conversions between CRD versions + install.Install(scheme) +} + +// getCRD lazily loads and returns the CustomResourceDefinition unmarshaled from a file. +func getCRD(file crdFile) *apiextensionsv1.CustomResourceDefinition { + lock.Lock() + defer lock.Unlock() + + if crd, ok := loaded[file]; ok && crd != nil { + return crd + } + + // Unmarshal and memoize + crd := file.mustUnmarshal() + loaded[file] = crd + + return crd +} + +// TODO(njhale): codegen this. + +// CatalogSource returns a copy of the CustomResourceDefinition for the latest version of the CatalogSource API. +func CatalogSource() *apiextensionsv1.CustomResourceDefinition { + return getCRD("operators.coreos.com_catalogsources.yaml").DeepCopy() +} + +// ClusterServiceVersion returns a copy of the CustomResourceDefinition for the latest version of the ClusterServiceVersion API. +func ClusterServiceVersion() *apiextensionsv1.CustomResourceDefinition { + return getCRD("operators.coreos.com_clusterserviceversions.yaml").DeepCopy() +} + +// InstallPlan returns a copy of the CustomResourceDefinition for the latest version of the InstallPlan API. +func InstallPlan() *apiextensionsv1.CustomResourceDefinition { + return getCRD("operators.coreos.com_installplans.yaml").DeepCopy() +} + +// OperatorGroup returns a copy of the CustomResourceDefinition for the latest version of the OperatorGroup API. +func OperatorGroup() *apiextensionsv1.CustomResourceDefinition { + return getCRD("operators.coreos.com_operatorgroups.yaml").DeepCopy() +} + +// Operator returns a copy of the CustomResourceDefinition for the latest version of the Operator API. +func Operator() *apiextensionsv1.CustomResourceDefinition { + return getCRD("operators.coreos.com_operators.yaml").DeepCopy() +} + +// Subscription returns a copy of the CustomResourceDefinition for the latest version of the Subscription API. +func Subscription() *apiextensionsv1.CustomResourceDefinition { + return getCRD("operators.coreos.com_subscriptions.yaml").DeepCopy() +} diff --git a/crds/defs_test.go b/crds/defs_test.go new file mode 100644 index 000000000..53e274ece --- /dev/null +++ b/crds/defs_test.go @@ -0,0 +1,56 @@ +package crds + +import ( + "reflect" + "testing" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +var emptyCRD = &apiextensionsv1.CustomResourceDefinition{} + +func TestGetters(t *testing.T) { + tests := []struct { + description string + get func() *apiextensionsv1.CustomResourceDefinition + }{ + { + description: "CatalogSource", + get: CatalogSource, + }, + { + description: "ClusterServiceVersion", + get: ClusterServiceVersion, + }, + { + description: "InstallPlan", + get: InstallPlan, + }, + { + description: "OperatorGroup", + get: OperatorGroup, + }, + { + description: "Operator", + get: Operator, + }, + { + description: "Subscription", + get: Subscription, + }, + } + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Errorf("panic loading crd: %v", x) + } + }() + + crd := tt.get() + if crd == nil || reflect.DeepEqual(crd, emptyCRD) { + t.Error("loaded CustomResourceDefinition is empty") + } + }) + } +} diff --git a/crds/doc.go b/crds/doc.go new file mode 100644 index 000000000..57085fc11 --- /dev/null +++ b/crds/doc.go @@ -0,0 +1,2 @@ +// Package crds contains CustomResourceDefinition manifests for operator-framework APIs. +package crds diff --git a/crds/operators.coreos.com_catalogsources.yaml b/crds/operators.coreos.com_catalogsources.yaml new file mode 100644 index 000000000..cccf26cb6 --- /dev/null +++ b/crds/operators.coreos.com_catalogsources.yaml @@ -0,0 +1,190 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: catalogsources.operators.coreos.com + annotations: + displayName: CatalogSource + description: A source configured to find packages and updates. +spec: + group: operators.coreos.com + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + preserveUnknownFields: false + scope: Namespaced + names: + plural: catalogsources + singular: catalogsource + kind: CatalogSource + listKind: CatalogSourceList + shortNames: + - catsrc + categories: + - olm + additionalPrinterColumns: + - name: Display + type: string + description: The pretty name of the catalog + JSONPath: .spec.displayName + - name: Type + type: string + description: The type of the catalog + JSONPath: .spec.sourceType + - name: Publisher + type: string + description: The publisher of the catalog + JSONPath: .spec.publisher + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + subresources: + # status enables the status subresource. + status: {} + validation: + openAPIV3Schema: + description: CatalogSource is a repository of CSVs, CRDs, and operator packages. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + required: + - sourceType + properties: + address: + description: 'Address is a host that OLM can use to connect to a pre-existing + registry. Format: : Only used when SourceType + = SourceTypeGrpc. Ignored when the Image field is set.' + type: string + configMap: + description: ConfigMap is the name of the ConfigMap to be used to back + a configmap-server registry. Only used when SourceType = SourceTypeConfigmap + or SourceTypeInternal. + type: string + description: + type: string + displayName: + description: Metadata + type: string + icon: + type: object + required: + - base64data + - mediatype + properties: + base64data: + type: string + mediatype: + type: string + image: + description: Image is an operator-registry container image to instantiate + a registry-server with. Only used when SourceType = SourceTypeGrpc. + If present, the address field is ignored. + type: string + publisher: + type: string + secrets: + description: Secrets represent set of secrets that can be used to access + the contents of the catalog. It is best to keep this list small, since + each will need to be tried for every catalog entry. + type: array + items: + type: string + sourceType: + description: SourceType is the type of source + type: string + updateStrategy: + description: UpdateStrategy defines how updated catalog source images + can be discovered Consists of an interval that defines polling duration + and an embedded strategy type + type: object + properties: + registryPoll: + type: object + properties: + interval: + description: Interval is used to determine the time interval + between checks of the latest catalog source version. The catalog + operator polls to see if a new version of the catalog source + is available. If available, the latest image is pulled and + gRPC traffic is directed to the latest catalog source. + type: string + status: + type: object + properties: + configMapReference: + type: object + required: + - name + - namespace + properties: + lastUpdateTime: + type: string + format: date-time + name: + type: string + namespace: + type: string + resourceVersion: + type: string + uid: + description: UID is a type that holds unique ID values, including + UUIDs. Because we don't ONLY use UUIDs, this is an alias to string. Being + a type captures intent and helps make sure that UIDs and names + do not get conflated. + type: string + connectionState: + type: object + required: + - lastObservedState + properties: + address: + type: string + lastConnect: + type: string + format: date-time + lastObservedState: + type: string + latestImageRegistryPoll: + description: The last time the CatalogSource image registry has been + polled to ensure the image is up-to-date + type: string + format: date-time + message: + description: A human readable message indicating details about why the + CatalogSource is in this condition. + type: string + reason: + description: Reason is the reason the CatalogSource was transitioned + to its current state. + type: string + registryService: + type: object + properties: + createdAt: + type: string + format: date-time + port: + type: string + protocol: + type: string + serviceName: + type: string + serviceNamespace: + type: string diff --git a/crds/operators.coreos.com_clusterserviceversions.yaml b/crds/operators.coreos.com_clusterserviceversions.yaml new file mode 100644 index 000000000..935124797 --- /dev/null +++ b/crds/operators.coreos.com_clusterserviceversions.yaml @@ -0,0 +1,8491 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterserviceversions.operators.coreos.com + annotations: + displayName: Operator Version + description: Represents an Operator that should be running on the cluster, including + requirements and install strategy. +spec: + names: + plural: clusterserviceversions + singular: clusterserviceversion + kind: ClusterServiceVersion + listKind: ClusterServiceVersionList + shortNames: + - csv + - csvs + categories: + - olm + additionalPrinterColumns: + - name: Display + type: string + description: The name of the CSV + JSONPath: .spec.displayName + - name: Version + type: string + description: The version of the CSV + JSONPath: .spec.version + - name: Replaces + type: string + description: The name of a CSV that this one replaces + JSONPath: .spec.replaces + - name: Phase + type: string + JSONPath: .status.phase + group: operators.coreos.com + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + scope: Namespaced + preserveUnknownFields: false + subresources: + status: {} + validation: + openAPIV3Schema: + description: ClusterServiceVersion is a Custom Resource of type `ClusterServiceVersionSpec`. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ClusterServiceVersionSpec declarations tell OLM how to install + an operator that can manage apps for a given version. + type: object + required: + - displayName + - install + properties: + annotations: + description: Annotations is an unstructured key value map stored with + a resource that may be set by external tools to store and retrieve + arbitrary metadata. + type: object + additionalProperties: + type: string + apiservicedefinitions: + description: APIServiceDefinitions declares all of the extension apis + managed or required by an operator being ran by ClusterServiceVersion. + type: object + properties: + owned: + type: array + items: + description: APIServiceDescription provides details to OLM about + apis provided via aggregation + type: object + required: + - group + - kind + - name + - version + properties: + actionDescriptors: + type: array + items: + description: ActionDescriptor describes a declarative action + that can be performed on a custom resource instance + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + containerPort: + type: integer + format: int32 + deploymentName: + type: string + description: + type: string + displayName: + type: string + group: + type: string + kind: + type: string + name: + type: string + resources: + type: array + items: + description: APIResourceReference is a Kubernetes resource + type used by a custom resource + type: object + required: + - kind + - name + - version + properties: + kind: + type: string + name: + type: string + version: + type: string + specDescriptors: + type: array + items: + description: SpecDescriptor describes a field in a spec + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + statusDescriptors: + type: array + items: + description: StatusDescriptor describes a field in a status + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + version: + type: string + required: + type: array + items: + description: APIServiceDescription provides details to OLM about + apis provided via aggregation + type: object + required: + - group + - kind + - name + - version + properties: + actionDescriptors: + type: array + items: + description: ActionDescriptor describes a declarative action + that can be performed on a custom resource instance + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + containerPort: + type: integer + format: int32 + deploymentName: + type: string + description: + type: string + displayName: + type: string + group: + type: string + kind: + type: string + name: + type: string + resources: + type: array + items: + description: APIResourceReference is a Kubernetes resource + type used by a custom resource + type: object + required: + - kind + - name + - version + properties: + kind: + type: string + name: + type: string + version: + type: string + specDescriptors: + type: array + items: + description: SpecDescriptor describes a field in a spec + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + statusDescriptors: + type: array + items: + description: StatusDescriptor describes a field in a status + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + version: + type: string + customresourcedefinitions: + description: "CustomResourceDefinitions declares all of the CRDs managed + or required by an operator being ran by ClusterServiceVersion. \n + If the CRD is present in the Owned list, it is implicitly required." + type: object + properties: + owned: + type: array + items: + description: CRDDescription provides details to OLM about the + CRDs + type: object + required: + - kind + - name + - version + properties: + actionDescriptors: + type: array + items: + description: ActionDescriptor describes a declarative action + that can be performed on a custom resource instance + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + description: + type: string + displayName: + type: string + kind: + type: string + name: + type: string + resources: + type: array + items: + description: APIResourceReference is a Kubernetes resource + type used by a custom resource + type: object + required: + - kind + - name + - version + properties: + kind: + type: string + name: + type: string + version: + type: string + specDescriptors: + type: array + items: + description: SpecDescriptor describes a field in a spec + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + statusDescriptors: + type: array + items: + description: StatusDescriptor describes a field in a status + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + version: + type: string + required: + type: array + items: + description: CRDDescription provides details to OLM about the + CRDs + type: object + required: + - kind + - name + - version + properties: + actionDescriptors: + type: array + items: + description: ActionDescriptor describes a declarative action + that can be performed on a custom resource instance + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + description: + type: string + displayName: + type: string + kind: + type: string + name: + type: string + resources: + type: array + items: + description: APIResourceReference is a Kubernetes resource + type used by a custom resource + type: object + required: + - kind + - name + - version + properties: + kind: + type: string + name: + type: string + version: + type: string + specDescriptors: + type: array + items: + description: SpecDescriptor describes a field in a spec + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + statusDescriptors: + type: array + items: + description: StatusDescriptor describes a field in a status + block of a CRD so that OLM can consume it + type: object + required: + - path + properties: + description: + type: string + displayName: + type: string + path: + type: string + value: + description: RawMessage is a raw encoded JSON value. + It implements Marshaler and Unmarshaler and can be + used to delay JSON decoding or precompute a JSON encoding. + type: string + format: byte + x-descriptors: + type: array + items: + type: string + version: + type: string + description: + type: string + displayName: + type: string + icon: + type: array + items: + type: object + required: + - base64data + - mediatype + properties: + base64data: + type: string + mediatype: + type: string + install: + description: NamedInstallStrategy represents the block of an ClusterServiceVersion + resource where the install strategy is specified. + type: object + required: + - strategy + properties: + spec: + description: StrategyDetailsDeployment represents the parsed details + of a Deployment InstallStrategy. + type: object + required: + - deployments + properties: + clusterPermissions: + type: array + items: + description: StrategyDeploymentPermissions describe the rbac + rules and service account needed by the install strategy + type: object + required: + - rules + - serviceAccountName + properties: + rules: + type: array + items: + description: PolicyRule holds information that describes + a policy rule, but does not contain information about + who the rule applies to or which namespace the rule + applies to. + type: object + required: + - verbs + properties: + apiGroups: + description: APIGroups is the name of the APIGroup + that contains the resources. If multiple API + groups are specified, any action requested against + one of the enumerated resources in any API group + will be allowed. + type: array + items: + type: string + nonResourceURLs: + description: NonResourceURLs is a set of partial + urls that a user should have access to. *s are + allowed, but only as the full, final step in the + path Since non-resource URLs are not namespaced, + this field is only applicable for ClusterRoles + referenced from a ClusterRoleBinding. Rules can + either apply to API resources (such as "pods" + or "secrets") or non-resource URL paths (such + as "/api"), but not both. + type: array + items: + type: string + resourceNames: + description: ResourceNames is an optional white + list of names that the rule applies to. An empty + set means that everything is allowed. + type: array + items: + type: string + resources: + description: Resources is a list of resources this + rule applies to. ResourceAll represents all resources. + type: array + items: + type: string + verbs: + description: Verbs is a list of Verbs that apply + to ALL the ResourceKinds and AttributeRestrictions + contained in this rule. VerbAll represents all + kinds. + type: array + items: + type: string + serviceAccountName: + type: string + deployments: + type: array + items: + description: StrategyDeploymentSpec contains the name and + spec for the deployment ALM should create + type: object + required: + - name + - spec + properties: + name: + type: string + spec: + description: DeploymentSpec is the specification of the + desired behavior of the Deployment. + type: object + required: + - selector + - template + properties: + minReadySeconds: + description: Minimum number of seconds for which a + newly created pod should be ready without any of + its container crashing, for it to be considered + available. Defaults to 0 (pod will be considered + available as soon as it is ready) + type: integer + format: int32 + paused: + description: Indicates that the deployment is paused. + type: boolean + progressDeadlineSeconds: + description: The maximum time in seconds for a deployment + to make progress before it is considered to be failed. + The deployment controller will continue to process + failed deployments and a condition with a ProgressDeadlineExceeded + reason will be surfaced in the deployment status. + Note that progress will not be estimated during + the time a deployment is paused. Defaults to 600s. + type: integer + format: int32 + replicas: + description: Number of desired pods. This is a pointer + to distinguish between explicit zero and not specified. + Defaults to 1. + type: integer + format: int32 + revisionHistoryLimit: + description: The number of old ReplicaSets to retain + to allow rollback. This is a pointer to distinguish + between explicit zero and not specified. Defaults + to 10. + type: integer + format: int32 + selector: + description: Label selector for pods. Existing ReplicaSets + whose pods are selected by this will be the ones + affected by this deployment. It must match the pod + template's labels. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label + selector requirements. The requirements are + ANDed. + type: array + items: + description: A label selector requirement is + a selector that contains values, a key, and + an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the + selector applies to. + type: string + operator: + description: operator represents a key's + relationship to a set of values. Valid + operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string + values. If the operator is In or NotIn, + the values array must be non-empty. If + the operator is Exists or DoesNotExist, + the values array must be empty. This array + is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} + pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, + whose key field is "key", the operator is "In", + and the values array contains only "value". + The requirements are ANDed. + type: object + additionalProperties: + type: string + strategy: + description: The deployment strategy to use to replace + existing pods with new ones. + type: object + properties: + rollingUpdate: + description: 'Rolling update config params. Present + only if DeploymentStrategyType = RollingUpdate. + --- TODO: Update this to follow our convention + for oneOf, whatever we decide it to be.' + type: object + properties: + maxSurge: + description: 'The maximum number of pods that + can be scheduled above the desired number + of pods. Value can be an absolute number + (ex: 5) or a percentage of desired pods + (ex: 10%). This can not be 0 if MaxUnavailable + is 0. Absolute number is calculated from + percentage by rounding up. Defaults to 25%. + Example: when this is set to 30%, the new + ReplicaSet can be scaled up immediately + when the rolling update starts, such that + the total number of old and new pods do + not exceed 130% of desired pods. Once old + pods have been killed, new ReplicaSet can + be scaled up further, ensuring that total + number of pods running at any time during + the update is at most 130% of desired pods.' + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + maxUnavailable: + description: 'The maximum number of pods that + can be unavailable during the update. Value + can be an absolute number (ex: 5) or a percentage + of desired pods (ex: 10%). Absolute number + is calculated from percentage by rounding + down. This can not be 0 if MaxSurge is 0. + Defaults to 25%. Example: when this is set + to 30%, the old ReplicaSet can be scaled + down to 70% of desired pods immediately + when the rolling update starts. Once new + pods are ready, old ReplicaSet can be scaled + down further, followed by scaling up the + new ReplicaSet, ensuring that the total + number of pods available at all times during + the update is at least 70% of desired pods.' + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + type: + description: Type of deployment. Can be "Recreate" + or "RollingUpdate". Default is RollingUpdate. + type: string + template: + description: Template describes the pods that will + be created. + type: object + properties: + metadata: + description: 'Standard object''s metadata. More + info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + x-kubernetes-preserve-unknown-fields: true + spec: + description: 'Specification of the desired behavior + of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + type: object + required: + - containers + properties: + activeDeadlineSeconds: + description: Optional duration in seconds + the pod may be active on the node relative + to StartTime before the system will actively + try to mark it failed and kill associated + containers. Value must be a positive integer. + type: integer + format: int64 + affinity: + description: If specified, the pod's scheduling + constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling + rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node matches the + corresponding matchExpressions; + the node(s) with the highest sum + are the most preferred. + type: array + items: + description: An empty preferred + scheduling term matches all objects + with implicit weight 0 (i.e. it's + a no-op). A null preferred scheduling + term matches no objects (i.e. + is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector + term, associated with the + corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + weight: + description: Weight associated + with matching the corresponding + nodeSelectorTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to an update), the system + may or may not try to eventually + evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list + of node selector terms. The + terms are ORed. + type: array + items: + description: A null or empty + node selector term matches + no objects. The requirements + of them are ANDed. The TopologySelectorTerm + type implements a subset of + the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node + selector requirements + by node's labels. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchFields: + description: A list of node + selector requirements + by node's fields. + type: array + items: + description: A node selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: The label + key that the selector + applies to. + type: string + operator: + description: Represents + a key's relationship + to a set of values. + Valid operators + are In, NotIn, Exists, + DoesNotExist. Gt, + and Lt. + type: string + values: + description: An array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. If + the operator is + Gt or Lt, the values + array must have + a single element, + which will be interpreted + as an integer. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + podAffinity: + description: Describes pod affinity scheduling + rules (e.g. co-locate this pod in the + same node, zone, etc. as some other + pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + affinity expressions, etc.), compute + a sum by iterating through the elements + of this field and adding "weight" + to the sum if the node has pods + which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements + specified by this field are not + met at scheduling time, the pod + will not be scheduled onto the node. + If the affinity requirements specified + by this field cease to be met at + some point during pod execution + (e.g. due to a pod label update), + the system may or may not try to + eventually evict the pod from its + node. When there are multiple elements, + the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity + scheduling rules (e.g. avoid putting + this pod in the same node, zone, etc. + as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer + to schedule pods to nodes that satisfy + the anti-affinity expressions specified + by this field, but it may choose + a node that violates one or more + of the expressions. The node that + is most preferred is the one with + the greatest sum of weights, i.e. + for each node that meets all of + the scheduling requirements (resource + request, requiredDuringScheduling + anti-affinity expressions, etc.), + compute a sum by iterating through + the elements of this field and adding + "weight" to the sum if the node + has pods which matches the corresponding + podAffinityTerm; the node(s) with + the highest sum are the most preferred. + type: array + items: + description: The weights of all + of the matched WeightedPodAffinityTerm + fields are added per-node to find + the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod + affinity term, associated + with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query + over a set of resources, + in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label + selector requirements. + The requirements are + ANDed. + type: array + items: + description: A label + selector requirement + is a selector that + contains values, + a key, and an operator + that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key + is the label + key that the + selector applies + to. + type: string + operator: + description: operator + represents a + key's relationship + to a set of + values. Valid + operators are + In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array + of string values. + If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or + DoesNotExist, + the values array + must be empty. + This array is + replaced during + a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels + map is equivalent + to an element of matchExpressions, + whose key field is + "key", the operator + is "In", and the values + array contains only + "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces + specifies which namespaces + the labelSelector applies + to (matches against); + null or empty list means + "this pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) + or not co-located (anti-affinity) + with the pods matching + the labelSelector in the + specified namespaces, + where co-located is defined + as running on a node whose + value of the label with + key topologyKey matches + that of any node on which + any of the selected pods + is running. Empty topologyKey + is not allowed. + type: string + weight: + description: weight associated + with matching the corresponding + podAffinityTerm, in the range + 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity + requirements specified by this field + are not met at scheduling time, + the pod will not be scheduled onto + the node. If the anti-affinity requirements + specified by this field cease to + be met at some point during pod + execution (e.g. due to a pod label + update), the system may or may not + try to eventually evict the pod + from its node. When there are multiple + elements, the lists of nodes corresponding + to each podAffinityTerm are intersected, + i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods + (namely those matching the labelSelector + relative to the given namespace(s)) + that this pod should be co-located + (affinity) or not co-located (anti-affinity) + with, where co-located is defined + as running on a node whose value + of the label with key + matches that of any node on which + a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over + a set of resources, in this + case pods. + type: object + properties: + matchExpressions: + description: matchExpressions + is a list of label selector + requirements. The requirements + are ANDed. + type: array + items: + description: A label selector + requirement is a selector + that contains values, + a key, and an operator + that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is + the label key that + the selector applies + to. + type: string + operator: + description: operator + represents a key's + relationship to + a set of values. + Valid operators + are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values + is an array of string + values. If the operator + is In or NotIn, + the values array + must be non-empty. + If the operator + is Exists or DoesNotExist, + the values array + must be empty. This + array is replaced + during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels + is a map of {key,value} + pairs. A single {key,value} + in the matchLabels map + is equivalent to an element + of matchExpressions, whose + key field is "key", the + operator is "In", and + the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + namespaces: + description: namespaces specifies + which namespaces the labelSelector + applies to (matches against); + null or empty list means "this + pod's namespace" + type: array + items: + type: string + topologyKey: + description: This pod should + be co-located (affinity) or + not co-located (anti-affinity) + with the pods matching the + labelSelector in the specified + namespaces, where co-located + is defined as running on a + node whose value of the label + with key topologyKey matches + that of any node on which + any of the selected pods is + running. Empty topologyKey + is not allowed. + type: string + automountServiceAccountToken: + description: AutomountServiceAccountToken + indicates whether a service account token + should be automatically mounted. + type: boolean + containers: + description: List of containers belonging + to the pod. Containers cannot currently + be added or removed. There must be at least + one container in a Pod. Cannot be updated. + type: array + items: + description: A single application container + that you want to run within a pod. + type: object + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. + The docker image''s CMD is used if + this is not provided. Variable references + $(VAR_NAME) are expanded using the + container''s environment. If a variable + cannot be resolved, the reference + in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped + with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable + exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + type: array + items: + type: string + command: + description: 'Entrypoint array. Not + executed within a shell. The docker + image''s ENTRYPOINT is used if this + is not provided. Variable references + $(VAR_NAME) are expanded using the + container''s environment. If a variable + cannot be resolved, the reference + in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped + with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable + exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + type: array + items: + type: string + env: + description: List of environment variables + to set in the container. Cannot be + updated. + type: array + items: + description: EnvVar represents an + environment variable present in + a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references + $(VAR_NAME) are expanded using + the previous defined environment + variables in the container and + any service environment variables. + If a variable cannot be resolved, + the reference in the input string + will be unchanged. The $(VAR_NAME) + syntax can be escaped with a + double $$, ie: $$(VAR_NAME). + Escaped references will never + be expanded, regardless of whether + the variable exists or not. + Defaults to "".' + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be + used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key + of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to + select. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap or its + key must be defined + type: boolean + fieldRef: + description: 'Selects a field + of the pod: supports metadata.name, + metadata.namespace, metadata.labels, + metadata.annotations, spec.nodeName, + spec.serviceAccountName, + status.hostIP, status.podIP, + status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory + and requests.ephemeral-storage) + are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies + the output format of + the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + secretKeyRef: + description: Selects a key + of a secret in the pod's + namespace + type: object + required: + - key + properties: + key: + description: The key of + the secret to select + from. Must be a valid + secret key. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the Secret or its key + must be defined + type: boolean + envFrom: + description: List of sources to populate + environment variables in the container. + The keys defined within a source must + be a C_IDENTIFIER. All invalid keys + will be reported as an event when + the container is starting. When a + key exists in multiple sources, the + value associated with the last source + will take precedence. Values defined + by an Env with a duplicate key will + take precedence. Cannot be updated. + type: array + items: + description: EnvFromSource represents + the source of a set of ConfigMaps + type: object + properties: + configMapRef: + description: The ConfigMap to + select from + type: object + properties: + name: + description: 'Name of the + referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap must be defined + type: boolean + prefix: + description: An optional identifier + to prepend to each key in the + ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select + from + type: object + properties: + name: + description: 'Name of the + referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether + the Secret must be defined + type: boolean + image: + description: 'Docker image name. More + info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher + level config management to default + or override container images in workload + controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One + of Always, Never, IfNotPresent. Defaults + to Always if :latest tag is specified, + or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management + system should take in response to + container lifecycle events. Cannot + be updated. + type: object + properties: + postStart: + description: 'PostStart is called + immediately after a container + is created. If the handler fails, + the container is terminated and + restarted according to its restart + policy. Other management of the + container blocks until the hook + completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: object + properties: + exec: + description: One and only one + of the following should be + specified. Exec specifies + the action to take. + type: object + properties: + command: + description: Command is + the command line to execute + inside the container, + the working directory + for the command is root + ('/') in the container's + filesystem. The command + is simply exec'd, it is + not run inside a shell, + so traditional shell instructions + ('|', etc) won't work. + To use a shell, you need + to explicitly call out + to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + httpGet: + description: HTTPGet specifies + the http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to + connect to, defaults to + the pod IP. You probably + want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. + HTTP allows repeated headers. + type: array + items: + description: HTTPHeader + describes a custom header + to be used in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use + for connecting to the + host. Defaults to HTTP. + type: string + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP + port. TCP hooks not yet supported + TODO: implement a realistic + TCP lifecycle hook' + type: object + required: + - port + properties: + host: + description: 'Optional: + Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: Number or name + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + preStop: + description: 'PreStop is called + immediately before a container + is terminated due to an API request + or management event such as liveness/startup + probe failure, preemption, resource + contention, etc. The handler is + not called if the container crashes + or exits. The reason for termination + is passed to the handler. The + Pod''s termination grace period + countdown begins before the PreStop + hooked is executed. Regardless + of the outcome of the handler, + the container will eventually + terminate within the Pod''s termination + grace period. Other management + of the container blocks until + the hook completes or until the + termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: object + properties: + exec: + description: One and only one + of the following should be + specified. Exec specifies + the action to take. + type: object + properties: + command: + description: Command is + the command line to execute + inside the container, + the working directory + for the command is root + ('/') in the container's + filesystem. The command + is simply exec'd, it is + not run inside a shell, + so traditional shell instructions + ('|', etc) won't work. + To use a shell, you need + to explicitly call out + to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + httpGet: + description: HTTPGet specifies + the http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to + connect to, defaults to + the pod IP. You probably + want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. + HTTP allows repeated headers. + type: array + items: + description: HTTPHeader + describes a custom header + to be used in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use + for connecting to the + host. Defaults to HTTP. + type: string + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP + port. TCP hooks not yet supported + TODO: implement a realistic + TCP lifecycle hook' + type: object + required: + - port + properties: + host: + description: 'Optional: + Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: Number or name + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + livenessProbe: + description: 'Periodic probe of container + liveness. Container will be restarted + if the probe fails. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + name: + description: Name of the container specified + as a DNS_LABEL. Each container in + a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose + from the container. Exposing a port + here gives the system additional information + about the network connections a container + uses, but is primarily informational. + Not specifying a port here DOES NOT + prevent that port from being exposed. + Any port which is listening on the + default "0.0.0.0" address inside a + container will be accessible from + the network. Cannot be updated. + type: array + items: + description: ContainerPort represents + a network port in a single container. + type: object + required: + - containerPort + properties: + containerPort: + description: Number of port to + expose on the pod's IP address. + This must be a valid port number, + 0 < x < 65536. + type: integer + format: int32 + hostIP: + description: What host IP to bind + the external port to. + type: string + hostPort: + description: Number of port to + expose on the host. If specified, + this must be a valid port number, + 0 < x < 65536. If HostNetwork + is specified, this must match + ContainerPort. Most containers + do not need this. + type: integer + format: int32 + name: + description: If specified, this + must be an IANA_SVC_NAME and + unique within the pod. Each + named port in a pod must have + a unique name. Name for the + port that can be referred to + by services. + type: string + protocol: + description: Protocol for port. + Must be UDP, TCP, or SCTP. Defaults + to "TCP". + type: string + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container + service readiness. Container will + be removed from service endpoints + if the probe fails. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + resources: + description: 'Compute Resources required + by this container. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + properties: + limits: + description: 'Limits describes the + maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes + the minimum amount of compute + resources required. If Requests + is omitted for a container, it + defaults to Limits if that is + explicitly specified, otherwise + to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + securityContext: + description: 'Security options the pod + should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + type: object + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation + controls whether a process can + gain more privileges than its + parent process. This bool directly + controls if the no_new_privs flag + will be set on the container process. + AllowPrivilegeEscalation is true + always when the container is: + 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to + add/drop when running containers. + Defaults to the default set of + capabilities granted by the container + runtime. + type: object + properties: + add: + description: Added capabilities + type: array + items: + description: Capability represent + POSIX capabilities type + type: string + drop: + description: Removed capabilities + type: array + items: + description: Capability represent + POSIX capabilities type + type: string + privileged: + description: Run container in privileged + mode. Processes in privileged + containers are essentially equivalent + to root on the host. Defaults + to false. + type: boolean + procMount: + description: procMount denotes the + type of proc mount to use for + the containers. The default is + DefaultProcMount which uses the + container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container + has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the + entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: integer + format: int64 + runAsNonRoot: + description: Indicates that the + container must run as a non-root + user. If true, the Kubelet will + validate the image at runtime + to ensure that it does not run + as UID 0 (root) and fail to start + the container if it does. If unset + or false, no such validation will + be performed. May also be set + in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: boolean + runAsUser: + description: The UID to run the + entrypoint of the container process. + Defaults to user specified in + image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: integer + format: int64 + seLinuxOptions: + description: The SELinux context + to be applied to the container. + If unspecified, the container + runtime will allocate a random + SELinux context for each container. May + also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: object + properties: + level: + description: Level is SELinux + level label that applies to + the container. + type: string + role: + description: Role is a SELinux + role label that applies to + the container. + type: string + type: + description: Type is a SELinux + type label that applies to + the container. + type: string + user: + description: User is a SELinux + user label that applies to + the container. + type: string + windowsOptions: + description: The Windows specific + settings applied to all containers. + If unspecified, the options from + the PodSecurityContext will be + used. If set in both SecurityContext + and PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: object + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec + is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the + GMSA credential spec named + by the GMSACredentialSpecName + field. This field is alpha-level + and is only honored by servers + that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. This field is + alpha-level and is only honored + by servers that enable the + WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in + Windows to run the entrypoint + of the container process. + Defaults to the user specified + in image metadata if unspecified. + May also be set in PodSecurityContext. + If set in both SecurityContext + and PodSecurityContext, the + value specified in SecurityContext + takes precedence. This field + is beta-level and may be disabled + with the WindowsRunAsUserName + feature flag. + type: string + startupProbe: + description: 'StartupProbe indicates + that the Pod has successfully initialized. + If specified, no other probes are + executed until this completes successfully. + If this probe fails, the Pod will + be restarted, just as if the livenessProbe + failed. This can be used to provide + different probe parameters at the + beginning of a Pod''s lifecycle, when + it might take a long time to load + data or warm a cache, than during + steady-state operation. This cannot + be updated. This is an alpha feature + enabled by the StartupProbe feature + flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + stdin: + description: Whether this container + should allocate a buffer for stdin + in the container runtime. If this + is not set, reads from stdin in the + container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime + should close the stdin channel after + it has been opened by a single attach. + When stdin is true the stdin stream + will remain open across multiple attach + sessions. If stdinOnce is set to true, + stdin is opened on container start, + is empty until the first client attaches + to stdin, and then remains open and + accepts data until the client disconnects, + at which time stdin is closed and + remains closed until the container + is restarted. If this flag is false, + a container processes that reads from + stdin will never receive an EOF. Default + is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which + the file to which the container''s + termination message will be written + is mounted into the container''s filesystem. + Message written is intended to be + brief final status, such as an assertion + failure message. Will be truncated + by the node if greater than 4096 bytes. + The total message length across all + containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination + message should be populated. File + will use the contents of terminationMessagePath + to populate the container status message + on both success and failure. FallbackToLogsOnError + will use the last chunk of container + log output if the termination message + file is empty and the container exited + with an error. The log output is limited + to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot + be updated. + type: string + tty: + description: Whether this container + should allocate a TTY for itself, + also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list + of block devices to be used by the + container. This is a beta feature. + type: array + items: + description: volumeDevice describes + a mapping of a raw block device + within a container. + type: object + required: + - devicePath + - name + properties: + devicePath: + description: devicePath is the + path inside of the container + that the device will be mapped + to. + type: string + name: + description: name must match the + name of a persistentVolumeClaim + in the pod + type: string + volumeMounts: + description: Pod volumes to mount into + the container's filesystem. Cannot + be updated. + type: array + items: + description: VolumeMount describes + a mounting of a Volume within a + container. + type: object + required: + - mountPath + - name + properties: + mountPath: + description: Path within the container + at which the volume should be + mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation + determines how mounts are propagated + from the host to container and + the other way around. When not + set, MountPropagationNone is + used. This field is beta in + 1.10. + type: string + name: + description: This must match the + Name of a Volume. + type: string + readOnly: + description: Mounted read-only + if true, read-write otherwise + (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume + from which the container's volume + should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within + the volume from which the container's + volume should be mounted. Behaves + similarly to SubPath but environment + variable references $(VAR_NAME) + are expanded using the container's + environment. Defaults to "" + (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + workingDir: + description: Container's working directory. + If not specified, the container runtime's + default will be used, which might + be configured in the container image. + Cannot be updated. + type: string + dnsConfig: + description: Specifies the DNS parameters + of a pod. Parameters specified here will + be merged to the generated DNS configuration + based on DNSPolicy. + type: object + properties: + nameservers: + description: A list of DNS name server + IP addresses. This will be appended + to the base nameservers generated from + DNSPolicy. Duplicated nameservers will + be removed. + type: array + items: + type: string + options: + description: A list of DNS resolver options. + This will be merged with the base options + generated from DNSPolicy. Duplicated + entries will be removed. Resolution + options given in Options will override + those that appear in the base DNSPolicy. + type: array + items: + description: PodDNSConfigOption defines + DNS resolver options of a pod. + type: object + properties: + name: + description: Required. + type: string + value: + type: string + searches: + description: A list of DNS search domains + for host-name lookup. This will be appended + to the base search paths generated from + DNSPolicy. Duplicated search paths will + be removed. + type: array + items: + type: string + dnsPolicy: + description: Set DNS policy for the pod. Defaults + to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet', + 'ClusterFirst', 'Default' or 'None'. DNS + parameters given in DNSConfig will be merged + with the policy selected with DNSPolicy. + To have DNS options set along with hostNetwork, + you have to specify DNS policy explicitly + to 'ClusterFirstWithHostNet'. + type: string + enableServiceLinks: + description: 'EnableServiceLinks indicates + whether information about services should + be injected into pod''s environment variables, + matching the syntax of Docker links. Optional: + Defaults to true.' + type: boolean + ephemeralContainers: + description: List of ephemeral containers + run in this pod. Ephemeral containers may + be run in an existing pod to perform user-initiated + actions such as debugging. This list cannot + be specified when creating a pod, and it + cannot be modified by updating the pod spec. + In order to add an ephemeral container to + an existing pod, use the pod's ephemeralcontainers + subresource. This field is alpha-level and + is only honored by servers that enable the + EphemeralContainers feature. + type: array + items: + description: An EphemeralContainer is a + container that may be added temporarily + to an existing pod for user-initiated + activities such as debugging. Ephemeral + containers have no resource or scheduling + guarantees, and they will not be restarted + when they exit or when a pod is removed + or restarted. If an ephemeral container + causes a pod to exceed its resource allocation, + the pod may be evicted. Ephemeral containers + may not be added by directly updating + the pod spec. They must be added via the + pod's ephemeralcontainers subresource, + and they will appear in the pod spec once + added. This is an alpha feature enabled + by the EphemeralContainers feature flag. + type: object + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. + The docker image''s CMD is used if + this is not provided. Variable references + $(VAR_NAME) are expanded using the + container''s environment. If a variable + cannot be resolved, the reference + in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped + with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable + exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + type: array + items: + type: string + command: + description: 'Entrypoint array. Not + executed within a shell. The docker + image''s ENTRYPOINT is used if this + is not provided. Variable references + $(VAR_NAME) are expanded using the + container''s environment. If a variable + cannot be resolved, the reference + in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped + with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable + exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + type: array + items: + type: string + env: + description: List of environment variables + to set in the container. Cannot be + updated. + type: array + items: + description: EnvVar represents an + environment variable present in + a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references + $(VAR_NAME) are expanded using + the previous defined environment + variables in the container and + any service environment variables. + If a variable cannot be resolved, + the reference in the input string + will be unchanged. The $(VAR_NAME) + syntax can be escaped with a + double $$, ie: $$(VAR_NAME). + Escaped references will never + be expanded, regardless of whether + the variable exists or not. + Defaults to "".' + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be + used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key + of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to + select. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap or its + key must be defined + type: boolean + fieldRef: + description: 'Selects a field + of the pod: supports metadata.name, + metadata.namespace, metadata.labels, + metadata.annotations, spec.nodeName, + spec.serviceAccountName, + status.hostIP, status.podIP, + status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory + and requests.ephemeral-storage) + are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies + the output format of + the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + secretKeyRef: + description: Selects a key + of a secret in the pod's + namespace + type: object + required: + - key + properties: + key: + description: The key of + the secret to select + from. Must be a valid + secret key. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the Secret or its key + must be defined + type: boolean + envFrom: + description: List of sources to populate + environment variables in the container. + The keys defined within a source must + be a C_IDENTIFIER. All invalid keys + will be reported as an event when + the container is starting. When a + key exists in multiple sources, the + value associated with the last source + will take precedence. Values defined + by an Env with a duplicate key will + take precedence. Cannot be updated. + type: array + items: + description: EnvFromSource represents + the source of a set of ConfigMaps + type: object + properties: + configMapRef: + description: The ConfigMap to + select from + type: object + properties: + name: + description: 'Name of the + referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap must be defined + type: boolean + prefix: + description: An optional identifier + to prepend to each key in the + ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select + from + type: object + properties: + name: + description: 'Name of the + referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether + the Secret must be defined + type: boolean + image: + description: 'Docker image name. More + info: https://kubernetes.io/docs/concepts/containers/images' + type: string + imagePullPolicy: + description: 'Image pull policy. One + of Always, Never, IfNotPresent. Defaults + to Always if :latest tag is specified, + or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Lifecycle is not allowed + for ephemeral containers. + type: object + properties: + postStart: + description: 'PostStart is called + immediately after a container + is created. If the handler fails, + the container is terminated and + restarted according to its restart + policy. Other management of the + container blocks until the hook + completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: object + properties: + exec: + description: One and only one + of the following should be + specified. Exec specifies + the action to take. + type: object + properties: + command: + description: Command is + the command line to execute + inside the container, + the working directory + for the command is root + ('/') in the container's + filesystem. The command + is simply exec'd, it is + not run inside a shell, + so traditional shell instructions + ('|', etc) won't work. + To use a shell, you need + to explicitly call out + to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + httpGet: + description: HTTPGet specifies + the http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to + connect to, defaults to + the pod IP. You probably + want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. + HTTP allows repeated headers. + type: array + items: + description: HTTPHeader + describes a custom header + to be used in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use + for connecting to the + host. Defaults to HTTP. + type: string + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP + port. TCP hooks not yet supported + TODO: implement a realistic + TCP lifecycle hook' + type: object + required: + - port + properties: + host: + description: 'Optional: + Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: Number or name + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + preStop: + description: 'PreStop is called + immediately before a container + is terminated due to an API request + or management event such as liveness/startup + probe failure, preemption, resource + contention, etc. The handler is + not called if the container crashes + or exits. The reason for termination + is passed to the handler. The + Pod''s termination grace period + countdown begins before the PreStop + hooked is executed. Regardless + of the outcome of the handler, + the container will eventually + terminate within the Pod''s termination + grace period. Other management + of the container blocks until + the hook completes or until the + termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: object + properties: + exec: + description: One and only one + of the following should be + specified. Exec specifies + the action to take. + type: object + properties: + command: + description: Command is + the command line to execute + inside the container, + the working directory + for the command is root + ('/') in the container's + filesystem. The command + is simply exec'd, it is + not run inside a shell, + so traditional shell instructions + ('|', etc) won't work. + To use a shell, you need + to explicitly call out + to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + httpGet: + description: HTTPGet specifies + the http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to + connect to, defaults to + the pod IP. You probably + want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. + HTTP allows repeated headers. + type: array + items: + description: HTTPHeader + describes a custom header + to be used in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use + for connecting to the + host. Defaults to HTTP. + type: string + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP + port. TCP hooks not yet supported + TODO: implement a realistic + TCP lifecycle hook' + type: object + required: + - port + properties: + host: + description: 'Optional: + Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: Number or name + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + livenessProbe: + description: Probes are not allowed + for ephemeral containers. + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + name: + description: Name of the ephemeral container + specified as a DNS_LABEL. This name + must be unique among all containers, + init containers and ephemeral containers. + type: string + ports: + description: Ports are not allowed for + ephemeral containers. + type: array + items: + description: ContainerPort represents + a network port in a single container. + type: object + required: + - containerPort + properties: + containerPort: + description: Number of port to + expose on the pod's IP address. + This must be a valid port number, + 0 < x < 65536. + type: integer + format: int32 + hostIP: + description: What host IP to bind + the external port to. + type: string + hostPort: + description: Number of port to + expose on the host. If specified, + this must be a valid port number, + 0 < x < 65536. If HostNetwork + is specified, this must match + ContainerPort. Most containers + do not need this. + type: integer + format: int32 + name: + description: If specified, this + must be an IANA_SVC_NAME and + unique within the pod. Each + named port in a pod must have + a unique name. Name for the + port that can be referred to + by services. + type: string + protocol: + description: Protocol for port. + Must be UDP, TCP, or SCTP. Defaults + to "TCP". + type: string + readinessProbe: + description: Probes are not allowed + for ephemeral containers. + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + resources: + description: Resources are not allowed + for ephemeral containers. Ephemeral + containers use spare resources already + allocated to the pod. + type: object + properties: + limits: + description: 'Limits describes the + maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes + the minimum amount of compute + resources required. If Requests + is omitted for a container, it + defaults to Limits if that is + explicitly specified, otherwise + to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + securityContext: + description: SecurityContext is not + allowed for ephemeral containers. + type: object + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation + controls whether a process can + gain more privileges than its + parent process. This bool directly + controls if the no_new_privs flag + will be set on the container process. + AllowPrivilegeEscalation is true + always when the container is: + 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to + add/drop when running containers. + Defaults to the default set of + capabilities granted by the container + runtime. + type: object + properties: + add: + description: Added capabilities + type: array + items: + description: Capability represent + POSIX capabilities type + type: string + drop: + description: Removed capabilities + type: array + items: + description: Capability represent + POSIX capabilities type + type: string + privileged: + description: Run container in privileged + mode. Processes in privileged + containers are essentially equivalent + to root on the host. Defaults + to false. + type: boolean + procMount: + description: procMount denotes the + type of proc mount to use for + the containers. The default is + DefaultProcMount which uses the + container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container + has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the + entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: integer + format: int64 + runAsNonRoot: + description: Indicates that the + container must run as a non-root + user. If true, the Kubelet will + validate the image at runtime + to ensure that it does not run + as UID 0 (root) and fail to start + the container if it does. If unset + or false, no such validation will + be performed. May also be set + in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: boolean + runAsUser: + description: The UID to run the + entrypoint of the container process. + Defaults to user specified in + image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: integer + format: int64 + seLinuxOptions: + description: The SELinux context + to be applied to the container. + If unspecified, the container + runtime will allocate a random + SELinux context for each container. May + also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: object + properties: + level: + description: Level is SELinux + level label that applies to + the container. + type: string + role: + description: Role is a SELinux + role label that applies to + the container. + type: string + type: + description: Type is a SELinux + type label that applies to + the container. + type: string + user: + description: User is a SELinux + user label that applies to + the container. + type: string + windowsOptions: + description: The Windows specific + settings applied to all containers. + If unspecified, the options from + the PodSecurityContext will be + used. If set in both SecurityContext + and PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: object + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec + is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the + GMSA credential spec named + by the GMSACredentialSpecName + field. This field is alpha-level + and is only honored by servers + that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. This field is + alpha-level and is only honored + by servers that enable the + WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in + Windows to run the entrypoint + of the container process. + Defaults to the user specified + in image metadata if unspecified. + May also be set in PodSecurityContext. + If set in both SecurityContext + and PodSecurityContext, the + value specified in SecurityContext + takes precedence. This field + is beta-level and may be disabled + with the WindowsRunAsUserName + feature flag. + type: string + startupProbe: + description: Probes are not allowed + for ephemeral containers. + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + stdin: + description: Whether this container + should allocate a buffer for stdin + in the container runtime. If this + is not set, reads from stdin in the + container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime + should close the stdin channel after + it has been opened by a single attach. + When stdin is true the stdin stream + will remain open across multiple attach + sessions. If stdinOnce is set to true, + stdin is opened on container start, + is empty until the first client attaches + to stdin, and then remains open and + accepts data until the client disconnects, + at which time stdin is closed and + remains closed until the container + is restarted. If this flag is false, + a container processes that reads from + stdin will never receive an EOF. Default + is false + type: boolean + targetContainerName: + description: If set, the name of the + container from PodSpec that this ephemeral + container targets. The ephemeral container + will be run in the namespaces (IPC, + PID, etc) of this container. If not + set then the ephemeral container is + run in whatever namespaces are shared + for the pod. Note that the container + runtime must support this feature. + type: string + terminationMessagePath: + description: 'Optional: Path at which + the file to which the container''s + termination message will be written + is mounted into the container''s filesystem. + Message written is intended to be + brief final status, such as an assertion + failure message. Will be truncated + by the node if greater than 4096 bytes. + The total message length across all + containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination + message should be populated. File + will use the contents of terminationMessagePath + to populate the container status message + on both success and failure. FallbackToLogsOnError + will use the last chunk of container + log output if the termination message + file is empty and the container exited + with an error. The log output is limited + to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot + be updated. + type: string + tty: + description: Whether this container + should allocate a TTY for itself, + also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list + of block devices to be used by the + container. This is a beta feature. + type: array + items: + description: volumeDevice describes + a mapping of a raw block device + within a container. + type: object + required: + - devicePath + - name + properties: + devicePath: + description: devicePath is the + path inside of the container + that the device will be mapped + to. + type: string + name: + description: name must match the + name of a persistentVolumeClaim + in the pod + type: string + volumeMounts: + description: Pod volumes to mount into + the container's filesystem. Cannot + be updated. + type: array + items: + description: VolumeMount describes + a mounting of a Volume within a + container. + type: object + required: + - mountPath + - name + properties: + mountPath: + description: Path within the container + at which the volume should be + mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation + determines how mounts are propagated + from the host to container and + the other way around. When not + set, MountPropagationNone is + used. This field is beta in + 1.10. + type: string + name: + description: This must match the + Name of a Volume. + type: string + readOnly: + description: Mounted read-only + if true, read-write otherwise + (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume + from which the container's volume + should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within + the volume from which the container's + volume should be mounted. Behaves + similarly to SubPath but environment + variable references $(VAR_NAME) + are expanded using the container's + environment. Defaults to "" + (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + workingDir: + description: Container's working directory. + If not specified, the container runtime's + default will be used, which might + be configured in the container image. + Cannot be updated. + type: string + hostAliases: + description: HostAliases is an optional list + of hosts and IPs that will be injected into + the pod's hosts file if specified. This + is only valid for non-hostNetwork pods. + type: array + items: + description: HostAlias holds the mapping + between IP and hostnames that will be + injected as an entry in the pod's hosts + file. + type: object + properties: + hostnames: + description: Hostnames for the above + IP address. + type: array + items: + type: string + ip: + description: IP address of the host + file entry. + type: string + hostIPC: + description: 'Use the host''s ipc namespace. + Optional: Default to false.' + type: boolean + hostNetwork: + description: Host networking requested for + this pod. Use the host's network namespace. + If this option is set, the ports that will + be used must be specified. Default to false. + type: boolean + hostPID: + description: 'Use the host''s pid namespace. + Optional: Default to false.' + type: boolean + hostname: + description: Specifies the hostname of the + Pod If not specified, the pod's hostname + will be set to a system-defined value. + type: string + imagePullSecrets: + description: 'ImagePullSecrets is an optional + list of references to secrets in the same + namespace to use for pulling any of the + images used by this PodSpec. If specified, + these secrets will be passed to individual + puller implementations for them to use. + For example, in the case of docker, only + DockerConfig type secrets are honored. More + info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod' + type: array + items: + description: LocalObjectReference contains + enough information to let you locate the + referenced object inside the same namespace. + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + initContainers: + description: 'List of initialization containers + belonging to the pod. Init containers are + executed in order prior to containers being + started. If any init container fails, the + pod is considered to have failed and is + handled according to its restartPolicy. + The name for an init container or normal + container must be unique among all containers. + Init containers may not have Lifecycle actions, + Readiness probes, Liveness probes, or Startup + probes. The resourceRequirements of an init + container are taken into account during + scheduling by finding the highest request/limit + for each resource type, and then using the + max of of that value or the sum of the normal + containers. Limits are applied to init containers + in a similar fashion. Init containers cannot + currently be added or removed. Cannot be + updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/' + type: array + items: + description: A single application container + that you want to run within a pod. + type: object + required: + - name + properties: + args: + description: 'Arguments to the entrypoint. + The docker image''s CMD is used if + this is not provided. Variable references + $(VAR_NAME) are expanded using the + container''s environment. If a variable + cannot be resolved, the reference + in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped + with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable + exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + type: array + items: + type: string + command: + description: 'Entrypoint array. Not + executed within a shell. The docker + image''s ENTRYPOINT is used if this + is not provided. Variable references + $(VAR_NAME) are expanded using the + container''s environment. If a variable + cannot be resolved, the reference + in the input string will be unchanged. + The $(VAR_NAME) syntax can be escaped + with a double $$, ie: $$(VAR_NAME). + Escaped references will never be expanded, + regardless of whether the variable + exists or not. Cannot be updated. + More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell' + type: array + items: + type: string + env: + description: List of environment variables + to set in the container. Cannot be + updated. + type: array + items: + description: EnvVar represents an + environment variable present in + a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment + variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references + $(VAR_NAME) are expanded using + the previous defined environment + variables in the container and + any service environment variables. + If a variable cannot be resolved, + the reference in the input string + will be unchanged. The $(VAR_NAME) + syntax can be escaped with a + double $$, ie: $$(VAR_NAME). + Escaped references will never + be expanded, regardless of whether + the variable exists or not. + Defaults to "".' + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be + used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key + of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to + select. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap or its + key must be defined + type: boolean + fieldRef: + description: 'Selects a field + of the pod: supports metadata.name, + metadata.namespace, metadata.labels, + metadata.annotations, spec.nodeName, + spec.serviceAccountName, + status.hostIP, status.podIP, + status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, limits.ephemeral-storage, + requests.cpu, requests.memory + and requests.ephemeral-storage) + are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies + the output format of + the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + secretKeyRef: + description: Selects a key + of a secret in the pod's + namespace + type: object + required: + - key + properties: + key: + description: The key of + the secret to select + from. Must be a valid + secret key. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the Secret or its key + must be defined + type: boolean + envFrom: + description: List of sources to populate + environment variables in the container. + The keys defined within a source must + be a C_IDENTIFIER. All invalid keys + will be reported as an event when + the container is starting. When a + key exists in multiple sources, the + value associated with the last source + will take precedence. Values defined + by an Env with a duplicate key will + take precedence. Cannot be updated. + type: array + items: + description: EnvFromSource represents + the source of a set of ConfigMaps + type: object + properties: + configMapRef: + description: The ConfigMap to + select from + type: object + properties: + name: + description: 'Name of the + referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap must be defined + type: boolean + prefix: + description: An optional identifier + to prepend to each key in the + ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select + from + type: object + properties: + name: + description: 'Name of the + referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether + the Secret must be defined + type: boolean + image: + description: 'Docker image name. More + info: https://kubernetes.io/docs/concepts/containers/images + This field is optional to allow higher + level config management to default + or override container images in workload + controllers like Deployments and StatefulSets.' + type: string + imagePullPolicy: + description: 'Image pull policy. One + of Always, Never, IfNotPresent. Defaults + to Always if :latest tag is specified, + or IfNotPresent otherwise. Cannot + be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images' + type: string + lifecycle: + description: Actions that the management + system should take in response to + container lifecycle events. Cannot + be updated. + type: object + properties: + postStart: + description: 'PostStart is called + immediately after a container + is created. If the handler fails, + the container is terminated and + restarted according to its restart + policy. Other management of the + container blocks until the hook + completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: object + properties: + exec: + description: One and only one + of the following should be + specified. Exec specifies + the action to take. + type: object + properties: + command: + description: Command is + the command line to execute + inside the container, + the working directory + for the command is root + ('/') in the container's + filesystem. The command + is simply exec'd, it is + not run inside a shell, + so traditional shell instructions + ('|', etc) won't work. + To use a shell, you need + to explicitly call out + to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + httpGet: + description: HTTPGet specifies + the http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to + connect to, defaults to + the pod IP. You probably + want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. + HTTP allows repeated headers. + type: array + items: + description: HTTPHeader + describes a custom header + to be used in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use + for connecting to the + host. Defaults to HTTP. + type: string + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP + port. TCP hooks not yet supported + TODO: implement a realistic + TCP lifecycle hook' + type: object + required: + - port + properties: + host: + description: 'Optional: + Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: Number or name + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + preStop: + description: 'PreStop is called + immediately before a container + is terminated due to an API request + or management event such as liveness/startup + probe failure, preemption, resource + contention, etc. The handler is + not called if the container crashes + or exits. The reason for termination + is passed to the handler. The + Pod''s termination grace period + countdown begins before the PreStop + hooked is executed. Regardless + of the outcome of the handler, + the container will eventually + terminate within the Pod''s termination + grace period. Other management + of the container blocks until + the hook completes or until the + termination grace period is reached. + More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks' + type: object + properties: + exec: + description: One and only one + of the following should be + specified. Exec specifies + the action to take. + type: object + properties: + command: + description: Command is + the command line to execute + inside the container, + the working directory + for the command is root + ('/') in the container's + filesystem. The command + is simply exec'd, it is + not run inside a shell, + so traditional shell instructions + ('|', etc) won't work. + To use a shell, you need + to explicitly call out + to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + httpGet: + description: HTTPGet specifies + the http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to + connect to, defaults to + the pod IP. You probably + want to set "Host" in + httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. + HTTP allows repeated headers. + type: array + items: + description: HTTPHeader + describes a custom header + to be used in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use + for connecting to the + host. Defaults to HTTP. + type: string + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP + port. TCP hooks not yet supported + TODO: implement a realistic + TCP lifecycle hook' + type: object + required: + - port + properties: + host: + description: 'Optional: + Host name to connect to, + defaults to the pod IP.' + type: string + port: + description: Number or name + of the port to access + on the container. Number + must be in the range 1 + to 65535. Name must be + an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + livenessProbe: + description: 'Periodic probe of container + liveness. Container will be restarted + if the probe fails. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + name: + description: Name of the container specified + as a DNS_LABEL. Each container in + a pod must have a unique name (DNS_LABEL). + Cannot be updated. + type: string + ports: + description: List of ports to expose + from the container. Exposing a port + here gives the system additional information + about the network connections a container + uses, but is primarily informational. + Not specifying a port here DOES NOT + prevent that port from being exposed. + Any port which is listening on the + default "0.0.0.0" address inside a + container will be accessible from + the network. Cannot be updated. + type: array + items: + description: ContainerPort represents + a network port in a single container. + type: object + required: + - containerPort + properties: + containerPort: + description: Number of port to + expose on the pod's IP address. + This must be a valid port number, + 0 < x < 65536. + type: integer + format: int32 + hostIP: + description: What host IP to bind + the external port to. + type: string + hostPort: + description: Number of port to + expose on the host. If specified, + this must be a valid port number, + 0 < x < 65536. If HostNetwork + is specified, this must match + ContainerPort. Most containers + do not need this. + type: integer + format: int32 + name: + description: If specified, this + must be an IANA_SVC_NAME and + unique within the pod. Each + named port in a pod must have + a unique name. Name for the + port that can be referred to + by services. + type: string + protocol: + description: Protocol for port. + Must be UDP, TCP, or SCTP. Defaults + to "TCP". + type: string + x-kubernetes-list-map-keys: + - containerPort + - protocol + x-kubernetes-list-type: map + readinessProbe: + description: 'Periodic probe of container + service readiness. Container will + be removed from service endpoints + if the probe fails. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + resources: + description: 'Compute Resources required + by this container. Cannot be updated. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + properties: + limits: + description: 'Limits describes the + maximum amount of compute resources + allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes + the minimum amount of compute + resources required. If Requests + is omitted for a container, it + defaults to Limits if that is + explicitly specified, otherwise + to an implementation-defined value. + More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + securityContext: + description: 'Security options the pod + should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ + More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/' + type: object + properties: + allowPrivilegeEscalation: + description: 'AllowPrivilegeEscalation + controls whether a process can + gain more privileges than its + parent process. This bool directly + controls if the no_new_privs flag + will be set on the container process. + AllowPrivilegeEscalation is true + always when the container is: + 1) run as Privileged 2) has CAP_SYS_ADMIN' + type: boolean + capabilities: + description: The capabilities to + add/drop when running containers. + Defaults to the default set of + capabilities granted by the container + runtime. + type: object + properties: + add: + description: Added capabilities + type: array + items: + description: Capability represent + POSIX capabilities type + type: string + drop: + description: Removed capabilities + type: array + items: + description: Capability represent + POSIX capabilities type + type: string + privileged: + description: Run container in privileged + mode. Processes in privileged + containers are essentially equivalent + to root on the host. Defaults + to false. + type: boolean + procMount: + description: procMount denotes the + type of proc mount to use for + the containers. The default is + DefaultProcMount which uses the + container runtime defaults for + readonly paths and masked paths. + This requires the ProcMountType + feature flag to be enabled. + type: string + readOnlyRootFilesystem: + description: Whether this container + has a read-only root filesystem. + Default is false. + type: boolean + runAsGroup: + description: The GID to run the + entrypoint of the container process. + Uses runtime default if unset. + May also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: integer + format: int64 + runAsNonRoot: + description: Indicates that the + container must run as a non-root + user. If true, the Kubelet will + validate the image at runtime + to ensure that it does not run + as UID 0 (root) and fail to start + the container if it does. If unset + or false, no such validation will + be performed. May also be set + in PodSecurityContext. If set + in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: boolean + runAsUser: + description: The UID to run the + entrypoint of the container process. + Defaults to user specified in + image metadata if unspecified. + May also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: integer + format: int64 + seLinuxOptions: + description: The SELinux context + to be applied to the container. + If unspecified, the container + runtime will allocate a random + SELinux context for each container. May + also be set in PodSecurityContext. If + set in both SecurityContext and + PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: object + properties: + level: + description: Level is SELinux + level label that applies to + the container. + type: string + role: + description: Role is a SELinux + role label that applies to + the container. + type: string + type: + description: Type is a SELinux + type label that applies to + the container. + type: string + user: + description: User is a SELinux + user label that applies to + the container. + type: string + windowsOptions: + description: The Windows specific + settings applied to all containers. + If unspecified, the options from + the PodSecurityContext will be + used. If set in both SecurityContext + and PodSecurityContext, the value + specified in SecurityContext takes + precedence. + type: object + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec + is where the GMSA admission + webhook (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the + GMSA credential spec named + by the GMSACredentialSpecName + field. This field is alpha-level + and is only honored by servers + that enable the WindowsGMSA + feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. This field is + alpha-level and is only honored + by servers that enable the + WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in + Windows to run the entrypoint + of the container process. + Defaults to the user specified + in image metadata if unspecified. + May also be set in PodSecurityContext. + If set in both SecurityContext + and PodSecurityContext, the + value specified in SecurityContext + takes precedence. This field + is beta-level and may be disabled + with the WindowsRunAsUserName + feature flag. + type: string + startupProbe: + description: 'StartupProbe indicates + that the Pod has successfully initialized. + If specified, no other probes are + executed until this completes successfully. + If this probe fails, the Pod will + be restarted, just as if the livenessProbe + failed. This can be used to provide + different probe parameters at the + beginning of a Pod''s lifecycle, when + it might take a long time to load + data or warm a cache, than during + steady-state operation. This cannot + be updated. This is an alpha feature + enabled by the StartupProbe feature + flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: object + properties: + exec: + description: One and only one of + the following should be specified. + Exec specifies the action to take. + type: object + properties: + command: + description: Command is the + command line to execute inside + the container, the working + directory for the command is + root ('/') in the container's + filesystem. The command is + simply exec'd, it is not run + inside a shell, so traditional + shell instructions ('|', etc) + won't work. To use a shell, + you need to explicitly call + out to that shell. Exit status + of 0 is treated as live/healthy + and non-zero is unhealthy. + type: array + items: + type: string + failureThreshold: + description: Minimum consecutive + failures for the probe to be considered + failed after having succeeded. + Defaults to 3. Minimum value is + 1. + type: integer + format: int32 + httpGet: + description: HTTPGet specifies the + http request to perform. + type: object + required: + - port + properties: + host: + description: Host name to connect + to, defaults to the pod IP. + You probably want to set "Host" + in httpHeaders instead. + type: string + httpHeaders: + description: Custom headers + to set in the request. HTTP + allows repeated headers. + type: array + items: + description: HTTPHeader describes + a custom header to be used + in HTTP probes + type: object + required: + - name + - value + properties: + name: + description: The header + field name + type: string + value: + description: The header + field value + type: string + path: + description: Path to access + on the HTTP server. + type: string + port: + description: Name or number + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + description: Scheme to use for + connecting to the host. Defaults + to HTTP. + type: string + initialDelaySeconds: + description: 'Number of seconds + after the container has started + before liveness probes are initiated. + More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + periodSeconds: + description: How often (in seconds) + to perform the probe. Default + to 10 seconds. Minimum value is + 1. + type: integer + format: int32 + successThreshold: + description: Minimum consecutive + successes for the probe to be + considered successful after having + failed. Defaults to 1. Must be + 1 for liveness and startup. Minimum + value is 1. + type: integer + format: int32 + tcpSocket: + description: 'TCPSocket specifies + an action involving a TCP port. + TCP hooks not yet supported TODO: + implement a realistic TCP lifecycle + hook' + type: object + required: + - port + properties: + host: + description: 'Optional: Host + name to connect to, defaults + to the pod IP.' + type: string + port: + description: Number or name + of the port to access on the + container. Number must be + in the range 1 to 65535. Name + must be an IANA_SVC_NAME. + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + timeoutSeconds: + description: 'Number of seconds + after which the probe times out. + Defaults to 1 second. Minimum + value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes' + type: integer + format: int32 + stdin: + description: Whether this container + should allocate a buffer for stdin + in the container runtime. If this + is not set, reads from stdin in the + container will always result in EOF. + Default is false. + type: boolean + stdinOnce: + description: Whether the container runtime + should close the stdin channel after + it has been opened by a single attach. + When stdin is true the stdin stream + will remain open across multiple attach + sessions. If stdinOnce is set to true, + stdin is opened on container start, + is empty until the first client attaches + to stdin, and then remains open and + accepts data until the client disconnects, + at which time stdin is closed and + remains closed until the container + is restarted. If this flag is false, + a container processes that reads from + stdin will never receive an EOF. Default + is false + type: boolean + terminationMessagePath: + description: 'Optional: Path at which + the file to which the container''s + termination message will be written + is mounted into the container''s filesystem. + Message written is intended to be + brief final status, such as an assertion + failure message. Will be truncated + by the node if greater than 4096 bytes. + The total message length across all + containers will be limited to 12kb. + Defaults to /dev/termination-log. + Cannot be updated.' + type: string + terminationMessagePolicy: + description: Indicate how the termination + message should be populated. File + will use the contents of terminationMessagePath + to populate the container status message + on both success and failure. FallbackToLogsOnError + will use the last chunk of container + log output if the termination message + file is empty and the container exited + with an error. The log output is limited + to 2048 bytes or 80 lines, whichever + is smaller. Defaults to File. Cannot + be updated. + type: string + tty: + description: Whether this container + should allocate a TTY for itself, + also requires 'stdin' to be true. + Default is false. + type: boolean + volumeDevices: + description: volumeDevices is the list + of block devices to be used by the + container. This is a beta feature. + type: array + items: + description: volumeDevice describes + a mapping of a raw block device + within a container. + type: object + required: + - devicePath + - name + properties: + devicePath: + description: devicePath is the + path inside of the container + that the device will be mapped + to. + type: string + name: + description: name must match the + name of a persistentVolumeClaim + in the pod + type: string + volumeMounts: + description: Pod volumes to mount into + the container's filesystem. Cannot + be updated. + type: array + items: + description: VolumeMount describes + a mounting of a Volume within a + container. + type: object + required: + - mountPath + - name + properties: + mountPath: + description: Path within the container + at which the volume should be + mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation + determines how mounts are propagated + from the host to container and + the other way around. When not + set, MountPropagationNone is + used. This field is beta in + 1.10. + type: string + name: + description: This must match the + Name of a Volume. + type: string + readOnly: + description: Mounted read-only + if true, read-write otherwise + (false or unspecified). Defaults + to false. + type: boolean + subPath: + description: Path within the volume + from which the container's volume + should be mounted. Defaults + to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within + the volume from which the container's + volume should be mounted. Behaves + similarly to SubPath but environment + variable references $(VAR_NAME) + are expanded using the container's + environment. Defaults to "" + (volume's root). SubPathExpr + and SubPath are mutually exclusive. + type: string + workingDir: + description: Container's working directory. + If not specified, the container runtime's + default will be used, which might + be configured in the container image. + Cannot be updated. + type: string + nodeName: + description: NodeName is a request to schedule + this pod onto a specific node. If it is + non-empty, the scheduler simply schedules + this pod onto that node, assuming that it + fits resource requirements. + type: string + nodeSelector: + description: 'NodeSelector is a selector which + must be true for the pod to fit on a node. + Selector which must match a node''s labels + for the pod to be scheduled on that node. + More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + overhead: + description: 'Overhead represents the resource + overhead associated with running a pod for + a given RuntimeClass. This field will be + autopopulated at admission time by the RuntimeClass + admission controller. If the RuntimeClass + admission controller is enabled, overhead + must not be set in Pod create requests. + The RuntimeClass admission controller will + reject Pod create requests which have the + overhead already set. If RuntimeClass is + configured and selected in the PodSpec, + Overhead will be set to the value defined + in the corresponding RuntimeClass, otherwise + it will remain unset and treated as zero. + More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + This field is alpha-level as of Kubernetes + v1.16, and is only honored by servers that + enable the PodOverhead feature.' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + preemptionPolicy: + description: PreemptionPolicy is the Policy + for preempting pods with lower priority. + One of Never, PreemptLowerPriority. Defaults + to PreemptLowerPriority if unset. This field + is alpha-level and is only honored by servers + that enable the NonPreemptingPriority feature. + type: string + priority: + description: The priority value. Various system + components use this field to find the priority + of the pod. When Priority Admission Controller + is enabled, it prevents users from setting + this field. The admission controller populates + this field from PriorityClassName. The higher + the value, the higher the priority. + type: integer + format: int32 + priorityClassName: + description: If specified, indicates the pod's + priority. "system-node-critical" and "system-cluster-critical" + are two special keywords which indicate + the highest priorities with the former being + the highest priority. Any other name must + be defined by creating a PriorityClass object + with that name. If not specified, the pod + priority will be default or zero if there + is no default. + type: string + readinessGates: + description: 'If specified, all readiness + gates will be evaluated for pod readiness. + A pod is ready when all its containers are + ready AND all conditions specified in the + readiness gates have status equal to "True" + More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md' + type: array + items: + description: PodReadinessGate contains the + reference to a pod condition + type: object + required: + - conditionType + properties: + conditionType: + description: ConditionType refers to + a condition in the pod's condition + list with matching type. + type: string + restartPolicy: + description: 'Restart policy for all containers + within the pod. One of Always, OnFailure, + Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy' + type: string + runtimeClassName: + description: 'RuntimeClassName refers to a + RuntimeClass object in the node.k8s.io group, + which should be used to run this pod. If + no RuntimeClass resource matches the named + class, the pod will not be run. If unset + or empty, the "legacy" RuntimeClass will + be used, which is an implicit class with + an empty definition that uses the default + runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + This is a beta feature as of Kubernetes + v1.14.' + type: string + schedulerName: + description: If specified, the pod will be + dispatched by specified scheduler. If not + specified, the pod will be dispatched by + default scheduler. + type: string + securityContext: + description: 'SecurityContext holds pod-level + security attributes and common container + settings. Optional: Defaults to empty. See + type description for default values of each + field.' + type: object + properties: + fsGroup: + description: "A special supplemental group + that applies to all containers in a + pod. Some volume types allow the Kubelet + to change the ownership of that volume + to be owned by the pod: \n 1. The owning + GID will be the FSGroup 2. The setgid + bit is set (new files created in the + volume will be owned by FSGroup) 3. + The permission bits are OR'd with rw-rw---- + \n If unset, the Kubelet will not modify + the ownership and permissions of any + volume." + type: integer + format: int64 + runAsGroup: + description: The GID to run the entrypoint + of the container process. Uses runtime + default if unset. May also be set in + SecurityContext. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence + for that container. + type: integer + format: int64 + runAsNonRoot: + description: Indicates that the container + must run as a non-root user. If true, + the Kubelet will validate the image + at runtime to ensure that it does not + run as UID 0 (root) and fail to start + the container if it does. If unset or + false, no such validation will be performed. + May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence. + type: boolean + runAsUser: + description: The UID to run the entrypoint + of the container process. Defaults to + user specified in image metadata if + unspecified. May also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence for that container. + type: integer + format: int64 + seLinuxOptions: + description: The SELinux context to be + applied to all containers. If unspecified, + the container runtime will allocate + a random SELinux context for each container. May + also be set in SecurityContext. If + set in both SecurityContext and PodSecurityContext, + the value specified in SecurityContext + takes precedence for that container. + type: object + properties: + level: + description: Level is SELinux level + label that applies to the container. + type: string + role: + description: Role is a SELinux role + label that applies to the container. + type: string + type: + description: Type is a SELinux type + label that applies to the container. + type: string + user: + description: User is a SELinux user + label that applies to the container. + type: string + supplementalGroups: + description: A list of groups applied + to the first process run in each container, + in addition to the container's primary + GID. If unspecified, no groups will + be added to any container. + type: array + items: + type: integer + format: int64 + sysctls: + description: Sysctls hold a list of namespaced + sysctls used for the pod. Pods with + unsupported sysctls (by the container + runtime) might fail to launch. + type: array + items: + description: Sysctl defines a kernel + parameter to be set + type: object + required: + - name + - value + properties: + name: + description: Name of a property + to set + type: string + value: + description: Value of a property + to set + type: string + windowsOptions: + description: The Windows specific settings + applied to all containers. If unspecified, + the options within a container's SecurityContext + will be used. If set in both SecurityContext + and PodSecurityContext, the value specified + in SecurityContext takes precedence. + type: object + properties: + gmsaCredentialSpec: + description: GMSACredentialSpec is + where the GMSA admission webhook + (https://github.com/kubernetes-sigs/windows-gmsa) + inlines the contents of the GMSA + credential spec named by the GMSACredentialSpecName + field. This field is alpha-level + and is only honored by servers that + enable the WindowsGMSA feature flag. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName + is the name of the GMSA credential + spec to use. This field is alpha-level + and is only honored by servers that + enable the WindowsGMSA feature flag. + type: string + runAsUserName: + description: The UserName in Windows + to run the entrypoint of the container + process. Defaults to the user specified + in image metadata if unspecified. + May also be set in PodSecurityContext. + If set in both SecurityContext and + PodSecurityContext, the value specified + in SecurityContext takes precedence. + This field is beta-level and may + be disabled with the WindowsRunAsUserName + feature flag. + type: string + serviceAccount: + description: 'DeprecatedServiceAccount is + a depreciated alias for ServiceAccountName. + Deprecated: Use serviceAccountName instead.' + type: string + serviceAccountName: + description: 'ServiceAccountName is the name + of the ServiceAccount to use to run this + pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/' + type: string + shareProcessNamespace: + description: 'Share a single process namespace + between all of the containers in a pod. + When this is set containers will be able + to view and signal processes from other + containers in the same pod, and the first + process in each container will not be assigned + PID 1. HostPID and ShareProcessNamespace + cannot both be set. Optional: Default to + false.' + type: boolean + subdomain: + description: If specified, the fully qualified + Pod hostname will be "...svc.". If not + specified, the pod will not have a domainname + at all. + type: string + terminationGracePeriodSeconds: + description: Optional duration in seconds + the pod needs to terminate gracefully. May + be decreased in delete request. Value must + be non-negative integer. The value zero + indicates delete immediately. If this value + is nil, the default grace period will be + used instead. The grace period is the duration + in seconds after the processes running in + the pod are sent a termination signal and + the time when the processes are forcibly + halted with a kill signal. Set this value + longer than the expected cleanup time for + your process. Defaults to 30 seconds. + type: integer + format: int64 + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is + attached to tolerates any taint that matches + the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint + effect to match. Empty means match + all taint effects. When specified, + allowed values are NoSchedule, PreferNoSchedule + and NoExecute. + type: string + key: + description: Key is the taint key that + the toleration applies to. Empty means + match all taint keys. If the key is + empty, operator must be Exists; this + combination means to match all values + and all keys. + type: string + operator: + description: Operator represents a key's + relationship to the value. Valid operators + are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard + for value, so that a pod can tolerate + all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents + the period of time the toleration + (which must be of effect NoExecute, + otherwise this field is ignored) tolerates + the taint. By default, it is not set, + which means tolerate the taint forever + (do not evict). Zero and negative + values will be treated as 0 (evict + immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value + the toleration matches to. If the + operator is Exists, the value should + be empty, otherwise just a regular + string. + type: string + topologySpreadConstraints: + description: TopologySpreadConstraints describes + how a group of pods ought to spread across + topology domains. Scheduler will schedule + pods in a way which abides by the constraints. + This field is alpha-level and is only honored + by clusters that enables the EvenPodsSpread + feature. All topologySpreadConstraints are + ANDed. + type: array + items: + description: TopologySpreadConstraint specifies + how to spread matching pods among the + given topology. + type: object + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + properties: + labelSelector: + description: LabelSelector is used to + find matching pods. Pods that match + this label selector are counted to + determine the number of pods in their + corresponding topology domain. + type: object + properties: + matchExpressions: + description: matchExpressions is + a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector + requirement is a selector that + contains values, a key, and + an operator that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: operator represents + a key's relationship to + a set of values. Valid operators + are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an + array of string values. + If the operator is In or + NotIn, the values array + must be non-empty. If the + operator is Exists or DoesNotExist, + the values array must be + empty. This array is replaced + during a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map + of {key,value} pairs. A single + {key,value} in the matchLabels + map is equivalent to an element + of matchExpressions, whose key + field is "key", the operator is + "In", and the values array contains + only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + maxSkew: + description: 'MaxSkew describes the + degree to which pods may be unevenly + distributed. It''s the maximum permitted + difference between the number of matching + pods in any two topology domains of + a given topology type. For example, + in a 3-zone cluster, MaxSkew is set + to 1, and pods with the same labelSelector + spread as 1/1/0: | zone1 | zone2 | + zone3 | | P | P | | + - if MaxSkew is 1, incoming pod can + only be scheduled to zone3 to become + 1/1/1; scheduling it onto zone1(zone2) + would make the ActualSkew(2-0) on + zone1(zone2) violate MaxSkew(1). - + if MaxSkew is 2, incoming pod can + be scheduled onto any zone. It''s + a required field. Default value is + 1 and 0 is not allowed.' + type: integer + format: int32 + topologyKey: + description: TopologyKey is the key + of node labels. Nodes that have a + label with this key and identical + values are considered to be in the + same topology. We consider each as a "bucket", and try to put + balanced number of pods into each + bucket. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates + how to deal with a pod if it doesn''t + satisfy the spread constraint. - DoNotSchedule + (default) tells the scheduler not + to schedule it - ScheduleAnyway tells + the scheduler to still schedule it + It''s considered as "Unsatisfiable" + if and only if placing incoming pod + on any topology violates "MaxSkew". + For example, in a 3-zone cluster, + MaxSkew is set to 1, and pods with + the same labelSelector spread as 3/1/1: + | zone1 | zone2 | zone3 | | P P P + | P | P | If WhenUnsatisfiable + is set to DoNotSchedule, incoming + pod can only be scheduled to zone2(zone3) + to become 3/2/1(3/1/2) as ActualSkew(2-1) + on zone2(zone3) satisfies MaxSkew(1). + In other words, the cluster can still + be imbalanced, but scheduler won''t + make it *more* imbalanced. It''s a + required field.' + type: string + x-kubernetes-list-map-keys: + - topologyKey + - whenUnsatisfiable + x-kubernetes-list-type: map + volumes: + description: 'List of volumes that can be + mounted by containers belonging to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes' + type: array + items: + description: Volume represents a named volume + in a pod that may be accessed by any container + in the pod. + type: object + required: + - name + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents + an AWS Disk resource that is attached + to a kubelet''s host machine and then + exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: object + required: + - volumeID + properties: + fsType: + description: 'Filesystem type of + the volume that you want to mount. + Tip: Ensure that the filesystem + type is supported by the host + operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More + info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors + in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the + volume that you want to mount. + If omitted, the default is to + mount by volume name. Examples: + For volume /dev/sda1, you specify + the partition as "1". Similarly, + the volume partition for /dev/sda + is "0" (or you can leave the property + empty).' + type: integer + format: int32 + readOnly: + description: 'Specify "true" to + force and set the ReadOnly property + in VolumeMounts to "true". If + omitted, the default is "false". + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent + disk resource in AWS (Amazon EBS + volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + azureDisk: + description: AzureDisk represents an + Azure Data Disk mount on the host + and bind mount to the pod. + type: object + required: + - diskName + - diskURI + properties: + cachingMode: + description: 'Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: The Name of the data + disk in the blob storage + type: string + diskURI: + description: The URI the data disk + in the blob storage + type: string + fsType: + description: Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" + if unspecified. + type: string + kind: + description: 'Expected values Shared: + multiple blob disks per storage + account Dedicated: single blob + disk per storage account Managed: + azure managed data disk (only + in managed availability set). + defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). + ReadOnly here will force the ReadOnly + setting in VolumeMounts. + type: boolean + azureFile: + description: AzureFile represents an + Azure File Service mount on the host + and bind mount to the pod. + type: object + required: + - secretName + - shareName + properties: + readOnly: + description: Defaults to false (read/write). + ReadOnly here will force the ReadOnly + setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret + that contains Azure Storage Account + Name and Key + type: string + shareName: + description: Share Name + type: string + cephfs: + description: CephFS represents a Ceph + FS mount on the host that shares a + pod's lifetime + type: object + required: + - monitors + properties: + monitors: + description: 'Required: Monitors + is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: array + items: + type: string + path: + description: 'Optional: Used as + the mounted root, rather than + the full Ceph tree, default is + /' + type: string + readOnly: + description: 'Optional: Defaults + to false (read/write). ReadOnly + here will force the ReadOnly setting + in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile + is the path to key ring for User, + default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef + is reference to the authentication + secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + user: + description: 'Optional: User is + the rados user name, default is + admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + cinder: + description: 'Cinder represents a cinder + volume attached and mounted on kubelets + host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: object + required: + - volumeID + properties: + fsType: + description: 'Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to + be "ext4" if unspecified. More + info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults + to false (read/write). ReadOnly + here will force the ReadOnly setting + in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to + a secret object containing parameters + used to connect to OpenStack.' + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + volumeID: + description: 'volume id used to + identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + configMap: + description: ConfigMap represents a + configMap that should populate this + volume + type: object + properties: + defaultMode: + description: 'Optional: mode bits + to use on created files by default. + Must be a value between 0 and + 0777. Defaults to 0644. Directories + within the path are not affected + by this setting. This might be + in conflict with other options + that affect the file mode, like + fsGroup, and the result can be + other mode bits set.' + type: integer + format: int32 + items: + description: If unspecified, each + key-value pair in the Data field + of the referenced ConfigMap will + be projected into the volume as + a file whose name is the key and + content is the value. If specified, + the listed keys will be projected + into the specified paths, and + unlisted keys will not be present. + If a key is specified which is + not present in the ConfigMap, + the volume setup will error unless + it is marked optional. Paths must + be relative and may not contain + the '..' path or start with '..'. + type: array + items: + description: Maps a string key + to a path within a volume. + type: object + required: + - key + - path + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode + bits to use on this file, + must be a value between + 0 and 0777. If not specified, + the volume defaultMode will + be used. This might be in + conflict with other options + that affect the file mode, + like fsGroup, and the result + can be other mode bits set.' + type: integer + format: int32 + path: + description: The relative + path of the file to map + the key to. May not be an + absolute path. May not contain + the path element '..'. May + not start with the string + '..'. + type: string + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the + ConfigMap or its keys must be + defined + type: boolean + csi: + description: CSI (Container Storage + Interface) represents storage that + is handled by an external CSI driver + (Alpha feature). + type: object + required: + - driver + properties: + driver: + description: Driver is the name + of the CSI driver that handles + this volume. Consult with your + admin for the correct name as + registered in the cluster. + type: string + fsType: + description: Filesystem type to + mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value + is passed to the associated CSI + driver which will determine the + default filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef + is a reference to the secret object + containing sensitive information + to pass to the CSI driver to complete + the CSI NodePublishVolume and + NodeUnpublishVolume calls. This + field is optional, and may be + empty if no secret is required. + If the secret object contains + more than one secret, all secret + references are passed. + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + readOnly: + description: Specifies a read-only + configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + description: VolumeAttributes stores + driver-specific properties that + are passed to the CSI driver. + Consult your driver's documentation + for supported values. + type: object + additionalProperties: + type: string + downwardAPI: + description: DownwardAPI represents + downward API about the pod that should + populate this volume + type: object + properties: + defaultMode: + description: 'Optional: mode bits + to use on created files by default. + Must be a value between 0 and + 0777. Defaults to 0644. Directories + within the path are not affected + by this setting. This might be + in conflict with other options + that affect the file mode, like + fsGroup, and the result can be + other mode bits set.' + type: integer + format: int32 + items: + description: Items is a list of + downward API volume file + type: array + items: + description: DownwardAPIVolumeFile + represents information to create + the file containing the pod + field + type: object + required: + - path + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only + annotations, labels, name + and namespace are supported.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of + the schema the FieldPath + is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the + field to select in the + specified API version. + type: string + mode: + description: 'Optional: mode + bits to use on this file, + must be a value between + 0 and 0777. If not specified, + the volume defaultMode will + be used. This might be in + conflict with other options + that affect the file mode, + like fsGroup, and the result + can be other mode bits set.' + type: integer + format: int32 + path: + description: 'Required: Path + is the relative path name + of the file to be created. + Must not be absolute or + contain the ''..'' path. + Must be utf-8 encoded. The + first item of the relative + path must not start with + ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource + of the container: only resources + limits and requests (limits.cpu, + limits.memory, requests.cpu + and requests.memory) are + currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container + name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies + the output format of + the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to select' + type: string + emptyDir: + description: 'EmptyDir represents a + temporary directory that shares a + pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: object + properties: + medium: + description: 'What type of storage + medium should back this directory. + The default is "" which means + to use the node''s default medium. + Must be an empty string (default) + or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: 'Total amount of local + storage required for this EmptyDir + volume. The size limit is also + applicable for memory medium. + The maximum usage on memory medium + EmptyDir would be the minimum + value between the SizeLimit specified + here and the sum of memory limits + of all containers in a pod. The + default is nil which means that + the limit is undefined. More info: + http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + fc: + description: FC represents a Fibre Channel + resource that is attached to a kubelet's + host machine and then exposed to the + pod. + type: object + properties: + fsType: + description: 'Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" + if unspecified. TODO: how do we + prevent errors in the filesystem + from compromising the machine' + type: string + lun: + description: 'Optional: FC target + lun number' + type: integer + format: int32 + readOnly: + description: 'Optional: Defaults + to false (read/write). ReadOnly + here will force the ReadOnly setting + in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target + worldwide names (WWNs)' + type: array + items: + type: string + wwids: + description: 'Optional: FC volume + world wide identifiers (wwids) + Either wwids or combination of + targetWWNs and lun must be set, + but not both simultaneously.' + type: array + items: + type: string + flexVolume: + description: FlexVolume represents a + generic volume resource that is provisioned/attached + using an exec based plugin. + type: object + required: + - driver + properties: + driver: + description: Driver is the name + of the driver to use for this + volume. + type: string + fsType: + description: Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + The default filesystem depends + on FlexVolume script. + type: string + options: + description: 'Optional: Extra command + options if any.' + type: object + additionalProperties: + type: string + readOnly: + description: 'Optional: Defaults + to false (read/write). ReadOnly + here will force the ReadOnly setting + in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef + is reference to the secret object + containing sensitive information + to pass to the plugin scripts. + This may be empty if no secret + object is specified. If the secret + object contains more than one + secret, all secrets are passed + to the plugin scripts.' + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + flocker: + description: Flocker represents a Flocker + volume attached to a kubelet's host + machine. This depends on the Flocker + control service being running + type: object + properties: + datasetName: + description: Name of the dataset + stored as metadata -> name on + the dataset for Flocker should + be considered as deprecated + type: string + datasetUUID: + description: UUID of the dataset. + This is unique identifier of a + Flocker dataset + type: string + gcePersistentDisk: + description: 'GCEPersistentDisk represents + a GCE Disk resource that is attached + to a kubelet''s host machine and then + exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: object + required: + - pdName + properties: + fsType: + description: 'Filesystem type of + the volume that you want to mount. + Tip: Ensure that the filesystem + type is supported by the host + operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More + info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors + in the filesystem from compromising + the machine' + type: string + partition: + description: 'The partition in the + volume that you want to mount. + If omitted, the default is to + mount by volume name. Examples: + For volume /dev/sda1, you specify + the partition as "1". Similarly, + the volume partition for /dev/sda + is "0" (or you can leave the property + empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: integer + format: int32 + pdName: + description: 'Unique name of the + PD resource in GCE. Used to identify + the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will + force the ReadOnly setting in + VolumeMounts. Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + gitRepo: + description: 'GitRepo represents a git + repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + To provision a container with a git + repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then + mount the EmptyDir into the Pod''s + container.' + type: object + required: + - repository + properties: + directory: + description: Target directory name. + Must not contain or start with + '..'. If '.' is supplied, the + volume directory will be the git + repository. Otherwise, if specified, + the volume will contain the git + repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the + specified revision. + type: string + glusterfs: + description: 'Glusterfs represents a + Glusterfs mount on the host that shares + a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + type: object + required: + - endpoints + - path + properties: + endpoints: + description: 'EndpointsName is the + endpoint name that details Glusterfs + topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs + volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will + force the Glusterfs volume to + be mounted with read-only permissions. + Defaults to false. More info: + https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + hostPath: + description: 'HostPath represents a + pre-existing file or directory on + the host machine that is directly + exposed to the container. This is + generally used for system agents or + other privileged things that are allowed + to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict + who can use host directory mounts + and who can/can not mount host directories + as read/write.' + type: object + required: + - path + properties: + path: + description: 'Path of the directory + on the host. If the path is a + symlink, it will follow the link + to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath + Volume Defaults to "" More info: + https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + iscsi: + description: 'ISCSI represents an ISCSI + Disk resource that is attached to + a kubelet''s host machine and then + exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + type: object + required: + - iqn + - lun + - targetPortal + properties: + chapAuthDiscovery: + description: whether support iSCSI + Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI + Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of + the volume that you want to mount. + Tip: Ensure that the filesystem + type is supported by the host + operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More + info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors + in the filesystem from compromising + the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator + Name. If initiatorName is specified + with iscsiInterface simultaneously, + new iSCSI interface : will be created for the + connection. + type: string + iqn: + description: Target iSCSI Qualified + Name. + type: string + iscsiInterface: + description: iSCSI Interface Name + that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + type: integer + format: int32 + portals: + description: iSCSI Target Portal + List. The portal is either an + IP or ip_addr:port if the port + is other than default (typically + TCP ports 860 and 3260). + type: array + items: + type: string + readOnly: + description: ReadOnly here will + force the ReadOnly setting in + VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI + target and initiator authentication + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + targetPortal: + description: iSCSI Target Portal. + The Portal is either an IP or + ip_addr:port if the port is other + than default (typically TCP ports + 860 and 3260). + type: string + name: + description: 'Volume''s name. Must be + a DNS_LABEL and unique within the + pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS + mount on the host that shares a pod''s + lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: object + required: + - path + - server + properties: + path: + description: 'Path that is exported + by the NFS server. More info: + https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will + force the NFS export to be mounted + with read-only permissions. Defaults + to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname + or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource + represents a reference to a PersistentVolumeClaim + in the same namespace. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: object + required: + - claimName + properties: + claimName: + description: 'ClaimName is the name + of a PersistentVolumeClaim in + the same namespace as the pod + using this volume. More info: + https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly + setting in VolumeMounts. Default + false. + type: boolean + photonPersistentDisk: + description: PhotonPersistentDisk represents + a PhotonController persistent disk + attached and mounted on kubelets host + machine + type: object + required: + - pdID + properties: + fsType: + description: Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" + if unspecified. + type: string + pdID: + description: ID that identifies + Photon Controller persistent disk + type: string + portworxVolume: + description: PortworxVolume represents + a portworx volume attached and mounted + on kubelets host machine + type: object + required: + - volumeID + properties: + fsType: + description: FSType represents the + filesystem type to mount Must + be a filesystem type supported + by the host operating system. + Ex. "ext4", "xfs". Implicitly + inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). + ReadOnly here will force the ReadOnly + setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies + a Portworx volume + type: string + projected: + description: Items for all in one resources + secrets, configmaps, and downward + API + type: object + required: + - sources + properties: + defaultMode: + description: Mode bits to use on + created files by default. Must + be a value between 0 and 0777. + Directories within the path are + not affected by this setting. + This might be in conflict with + other options that affect the + file mode, like fsGroup, and the + result can be other mode bits + set. + type: integer + format: int32 + sources: + description: list of volume projections + type: array + items: + description: Projection that may + be projected along with other + supported volume types + type: object + properties: + configMap: + description: information about + the configMap data to project + type: object + properties: + items: + description: If unspecified, + each key-value pair + in the Data field of + the referenced ConfigMap + will be projected into + the volume as a file + whose name is the key + and content is the value. + If specified, the listed + keys will be projected + into the specified paths, + and unlisted keys will + not be present. If a + key is specified which + is not present in the + ConfigMap, the volume + setup will error unless + it is marked optional. + Paths must be relative + and may not contain + the '..' path or start + with '..'. + type: array + items: + description: Maps a + string key to a path + within a volume. + type: object + required: + - key + - path + properties: + key: + description: The + key to project. + type: string + mode: + description: 'Optional: + mode bits to use + on this file, + must be a value + between 0 and + 0777. If not specified, + the volume defaultMode + will be used. + This might be + in conflict with + other options + that affect the + file mode, like + fsGroup, and the + result can be + other mode bits + set.' + type: integer + format: int32 + path: + description: The + relative path + of the file to + map the key to. + May not be an + absolute path. + May not contain + the path element + '..'. May not + start with the + string '..'. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the ConfigMap or its + keys must be defined + type: boolean + downwardAPI: + description: information about + the downwardAPI data to + project + type: object + properties: + items: + description: Items is + a list of DownwardAPIVolume + file + type: array + items: + description: DownwardAPIVolumeFile + represents information + to create the file + containing the pod + field + type: object + required: + - path + properties: + fieldRef: + description: 'Required: + Selects a field + of the pod: only + annotations, labels, + name and namespace + are supported.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version + of the schema + the FieldPath + is written + in terms of, + defaults to + "v1". + type: string + fieldPath: + description: Path + of the field + to select + in the specified + API version. + type: string + mode: + description: 'Optional: + mode bits to use + on this file, + must be a value + between 0 and + 0777. If not specified, + the volume defaultMode + will be used. + This might be + in conflict with + other options + that affect the + file mode, like + fsGroup, and the + result can be + other mode bits + set.' + type: integer + format: int32 + path: + description: 'Required: + Path is the relative + path name of the + file to be created. + Must not be absolute + or contain the + ''..'' path. Must + be utf-8 encoded. + The first item + of the relative + path must not + start with ''..''' + type: string + resourceFieldRef: + description: 'Selects + a resource of + the container: + only resources + limits and requests + (limits.cpu, limits.memory, + requests.cpu and + requests.memory) + are currently + supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container + name: required + for volumes, + optional for + env vars' + type: string + divisor: + description: Specifies + the output + format of + the exposed + resources, + defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: + resource to + select' + type: string + secret: + description: information about + the secret data to project + type: object + properties: + items: + description: If unspecified, + each key-value pair + in the Data field of + the referenced Secret + will be projected into + the volume as a file + whose name is the key + and content is the value. + If specified, the listed + keys will be projected + into the specified paths, + and unlisted keys will + not be present. If a + key is specified which + is not present in the + Secret, the volume setup + will error unless it + is marked optional. + Paths must be relative + and may not contain + the '..' path or start + with '..'. + type: array + items: + description: Maps a + string key to a path + within a volume. + type: object + required: + - key + - path + properties: + key: + description: The + key to project. + type: string + mode: + description: 'Optional: + mode bits to use + on this file, + must be a value + between 0 and + 0777. If not specified, + the volume defaultMode + will be used. + This might be + in conflict with + other options + that affect the + file mode, like + fsGroup, and the + result can be + other mode bits + set.' + type: integer + format: int32 + path: + description: The + relative path + of the file to + map the key to. + May not be an + absolute path. + May not contain + the path element + '..'. May not + start with the + string '..'. + type: string + name: + description: 'Name of + the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful + fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether + the Secret or its key + must be defined + type: boolean + serviceAccountToken: + description: information about + the serviceAccountToken + data to project + type: object + required: + - path + properties: + audience: + description: Audience + is the intended audience + of the token. A recipient + of a token must identify + itself with an identifier + specified in the audience + of the token, and otherwise + should reject the token. + The audience defaults + to the identifier of + the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds + is the requested duration + of validity of the service + account token. As the + token approaches expiration, + the kubelet volume plugin + will proactively rotate + the service account + token. The kubelet will + start trying to rotate + the token if the token + is older than 80 percent + of its time to live + or if the token is older + than 24 hours.Defaults + to 1 hour and must be + at least 10 minutes. + type: integer + format: int64 + path: + description: Path is the + path relative to the + mount point of the file + to project the token + into. + type: string + quobyte: + description: Quobyte represents a Quobyte + mount on the host that shares a pod's + lifetime + type: object + required: + - registry + - volume + properties: + group: + description: Group to map volume + access to Default is no group + type: string + readOnly: + description: ReadOnly here will + force the Quobyte volume to be + mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: Registry represents + a single or multiple Quobyte Registry + services specified as a string + as host:port pair (multiple entries + are separated with commas) which + acts as the central registry for + volumes + type: string + tenant: + description: Tenant owning the given + Quobyte volume in the Backend + Used with dynamically provisioned + Quobyte volumes, value is set + by the plugin + type: string + user: + description: User to map volume + access to Defaults to serivceaccount + user + type: string + volume: + description: Volume is a string + that references an already created + Quobyte volume by name. + type: string + rbd: + description: 'RBD represents a Rados + Block Device mount on the host that + shares a pod''s lifetime. More info: + https://examples.k8s.io/volumes/rbd/README.md' + type: object + required: + - image + - monitors + properties: + fsType: + description: 'Filesystem type of + the volume that you want to mount. + Tip: Ensure that the filesystem + type is supported by the host + operating system. Examples: "ext4", + "xfs", "ntfs". Implicitly inferred + to be "ext4" if unspecified. More + info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors + in the filesystem from compromising + the machine' + type: string + image: + description: 'The rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path + to key ring for RBDUser. Default + is /etc/ceph/keyring. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph + monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: array + items: + type: string + pool: + description: 'The rados pool name. + Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will + force the ReadOnly setting in + VolumeMounts. Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name + of the authentication secret for + RBDUser. If provided overrides + keyring. Default is nil. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + user: + description: 'The rados user name. + Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + scaleIO: + description: ScaleIO represents a ScaleIO + persistent volume attached and mounted + on Kubernetes nodes. + type: object + required: + - gateway + - secretRef + - system + properties: + fsType: + description: Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: The host address of + the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO + Protection Domain for the configured + storage. + type: string + readOnly: + description: Defaults to false (read/write). + ReadOnly here will force the ReadOnly + setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references + to the secret for ScaleIO user + and other sensitive information. + If this is not provided, Login + operation will fail. + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + sslEnabled: + description: Flag to enable/disable + SSL communication with Gateway, + default false + type: boolean + storageMode: + description: Indicates whether the + storage for a volume should be + ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage + Pool associated with the protection + domain. + type: string + system: + description: The name of the storage + system as configured in ScaleIO. + type: string + volumeName: + description: The name of a volume + already created in the ScaleIO + system that is associated with + this volume source. + type: string + secret: + description: 'Secret represents a secret + that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: object + properties: + defaultMode: + description: 'Optional: mode bits + to use on created files by default. + Must be a value between 0 and + 0777. Defaults to 0644. Directories + within the path are not affected + by this setting. This might be + in conflict with other options + that affect the file mode, like + fsGroup, and the result can be + other mode bits set.' + type: integer + format: int32 + items: + description: If unspecified, each + key-value pair in the Data field + of the referenced Secret will + be projected into the volume as + a file whose name is the key and + content is the value. If specified, + the listed keys will be projected + into the specified paths, and + unlisted keys will not be present. + If a key is specified which is + not present in the Secret, the + volume setup will error unless + it is marked optional. Paths must + be relative and may not contain + the '..' path or start with '..'. + type: array + items: + description: Maps a string key + to a path within a volume. + type: object + required: + - key + - path + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode + bits to use on this file, + must be a value between + 0 and 0777. If not specified, + the volume defaultMode will + be used. This might be in + conflict with other options + that affect the file mode, + like fsGroup, and the result + can be other mode bits set.' + type: integer + format: int32 + path: + description: The relative + path of the file to map + the key to. May not be an + absolute path. May not contain + the path element '..'. May + not start with the string + '..'. + type: string + optional: + description: Specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: 'Name of the secret + in the pod''s namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + storageos: + description: StorageOS represents a + StorageOS volume attached and mounted + on Kubernetes nodes. + type: object + properties: + fsType: + description: Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" + if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). + ReadOnly here will force the ReadOnly + setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies + the secret to use for obtaining + the StorageOS API credentials. If + not specified, default values + will be attempted. + type: object + properties: + name: + description: 'Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. + apiVersion, kind, uid?' + type: string + volumeName: + description: VolumeName is the human-readable + name of the StorageOS volume. Volume + names are only unique within a + namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies + the scope of the volume within + StorageOS. If no namespace is + specified then the Pod's namespace + will be used. This allows the + Kubernetes name scoping to be + mirrored within StorageOS for + tighter integration. Set VolumeName + to any name to override the default + behaviour. Set to "default" if + you are not using namespaces within + StorageOS. Namespaces that do + not pre-exist within StorageOS + will be created. + type: string + vsphereVolume: + description: VsphereVolume represents + a vSphere volume attached and mounted + on kubelets host machine + type: object + required: + - volumePath + properties: + fsType: + description: Filesystem type to + mount. Must be a filesystem type + supported by the host operating + system. Ex. "ext4", "xfs", "ntfs". + Implicitly inferred to be "ext4" + if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based + Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based + Management (SPBM) profile name. + type: string + volumePath: + description: Path that identifies + vSphere volume vmdk + type: string + permissions: + type: array + items: + description: StrategyDeploymentPermissions describe the rbac + rules and service account needed by the install strategy + type: object + required: + - rules + - serviceAccountName + properties: + rules: + type: array + items: + description: PolicyRule holds information that describes + a policy rule, but does not contain information about + who the rule applies to or which namespace the rule + applies to. + type: object + required: + - verbs + properties: + apiGroups: + description: APIGroups is the name of the APIGroup + that contains the resources. If multiple API + groups are specified, any action requested against + one of the enumerated resources in any API group + will be allowed. + type: array + items: + type: string + nonResourceURLs: + description: NonResourceURLs is a set of partial + urls that a user should have access to. *s are + allowed, but only as the full, final step in the + path Since non-resource URLs are not namespaced, + this field is only applicable for ClusterRoles + referenced from a ClusterRoleBinding. Rules can + either apply to API resources (such as "pods" + or "secrets") or non-resource URL paths (such + as "/api"), but not both. + type: array + items: + type: string + resourceNames: + description: ResourceNames is an optional white + list of names that the rule applies to. An empty + set means that everything is allowed. + type: array + items: + type: string + resources: + description: Resources is a list of resources this + rule applies to. ResourceAll represents all resources. + type: array + items: + type: string + verbs: + description: Verbs is a list of Verbs that apply + to ALL the ResourceKinds and AttributeRestrictions + contained in this rule. VerbAll represents all + kinds. + type: array + items: + type: string + serviceAccountName: + type: string + strategy: + type: string + installModes: + description: InstallModes specify supported installation types + type: array + items: + description: InstallMode associates an InstallModeType with a flag + representing if the CSV supports it + type: object + required: + - supported + - type + properties: + supported: + type: boolean + type: + description: InstallModeType is a supported type of install mode + for CSV installation + type: string + keywords: + type: array + items: + type: string + labels: + description: Map of string keys and values that can be used to organize + and categorize (scope and select) objects. + type: object + additionalProperties: + type: string + links: + type: array + items: + type: object + properties: + name: + type: string + url: + type: string + maintainers: + type: array + items: + type: object + properties: + email: + type: string + name: + type: string + maturity: + type: string + minKubeVersion: + type: string + nativeAPIs: + type: array + items: + description: GroupVersionKind unambiguously identifies a kind. It + doesn't anonymously include GroupVersion to avoid automatic coersion. It + doesn't use a GroupVersion to avoid custom marshalling + type: object + required: + - group + - kind + - version + properties: + group: + type: string + kind: + type: string + version: + type: string + provider: + type: object + properties: + name: + type: string + url: + type: string + replaces: + description: The name of a CSV this one replaces. Should match the `metadata.Name` + field of the old CSV. + type: string + selector: + description: Label selector for related resources. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains + values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to a + set of values. Valid operators are In, NotIn, Exists and + DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator + is In or NotIn, the values array must be non-empty. If the + operator is Exists or DoesNotExist, the values array must + be empty. This array is replaced during a strategic merge + patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator is + "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + version: + description: OperatorVersion is a wrapper around semver.Version which + supports correct marshaling to YAML and JSON. + type: string + status: + description: ClusterServiceVersionStatus represents information about the + status of a pod. Status may trail the actual state of a system. + type: object + properties: + certsLastUpdated: + description: Last time the owned APIService certs were updated + type: string + format: date-time + certsRotateAt: + description: Time the owned APIService certs will rotate next + type: string + format: date-time + conditions: + description: List of conditions, a history of state transitions + type: array + items: + description: Conditions appear in the status as a record of state + transitions on the ClusterServiceVersion + type: object + properties: + lastTransitionTime: + description: Last time the status transitioned from one status + to another. + type: string + format: date-time + lastUpdateTime: + description: Last time we updated the status + type: string + format: date-time + message: + description: A human readable message indicating details about + why the ClusterServiceVersion is in this condition. + type: string + phase: + description: Condition of the ClusterServiceVersion + type: string + reason: + description: A brief CamelCase message indicating details about + why the ClusterServiceVersion is in this state. e.g. 'RequirementsNotMet' + type: string + lastTransitionTime: + description: Last time the status transitioned from one status to another. + type: string + format: date-time + lastUpdateTime: + description: Last time we updated the status + type: string + format: date-time + message: + description: A human readable message indicating details about why the + ClusterServiceVersion is in this condition. + type: string + phase: + description: Current condition of the ClusterServiceVersion + type: string + reason: + description: A brief CamelCase message indicating details about why + the ClusterServiceVersion is in this state. e.g. 'RequirementsNotMet' + type: string + requirementStatus: + description: The status of each requirement for this CSV + type: array + items: + type: object + required: + - group + - kind + - message + - name + - status + - version + properties: + dependents: + type: array + items: + description: DependentStatus is the status for a dependent requirement + (to prevent infinite nesting) + type: object + required: + - group + - kind + - status + - version + properties: + group: + type: string + kind: + type: string + message: + type: string + status: + description: StatusReason is a camelcased reason for the + status of a RequirementStatus or DependentStatus + type: string + uuid: + type: string + version: + type: string + group: + type: string + kind: + type: string + message: + type: string + name: + type: string + status: + description: StatusReason is a camelcased reason for the status + of a RequirementStatus or DependentStatus + type: string + uuid: + type: string + version: + type: string diff --git a/crds/operators.coreos.com_installplans.yaml b/crds/operators.coreos.com_installplans.yaml new file mode 100644 index 000000000..2e5256795 --- /dev/null +++ b/crds/operators.coreos.com_installplans.yaml @@ -0,0 +1,303 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: installplans.operators.coreos.com + annotations: + displayName: Install Plan + description: Represents a plan to install and resolve dependencies for Cluster + Services +spec: + group: operators.coreos.com + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + scope: Namespaced + names: + plural: installplans + singular: installplan + kind: InstallPlan + listKind: InstallPlanList + shortNames: + - ip + categories: + - olm + additionalPrinterColumns: + - name: CSV + type: string + description: The first CSV in the list of clusterServiceVersionNames + JSONPath: .spec.clusterServiceVersionNames[0] + - name: Approval + type: string + description: The approval mode + JSONPath: .spec.approval + - name: Approved + type: boolean + JSONPath: .spec.approved + preserveUnknownFields: false + subresources: + # status enables the status subresource. + status: {} + validation: + openAPIV3Schema: + description: InstallPlan defines the installation of a set of operators. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: InstallPlanSpec defines a set of Application resources to be + installed + type: object + required: + - approval + - approved + - clusterServiceVersionNames + - generation + properties: + approval: + description: Approval is the user approval policy for an InstallPlan. + type: string + approved: + type: boolean + clusterServiceVersionNames: + type: array + items: + type: string + generation: + type: integer + source: + type: string + sourceNamespace: + type: string + status: + description: "InstallPlanStatus represents the information about the status + of steps required to complete installation. \n Status may trail the actual + state of a system." + type: object + required: + - catalogSources + - phase + properties: + attenuatedServiceAccountRef: + description: AttenuatedServiceAccountRef references the service account + that is used to do scoped operator install. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + bundleLookups: + description: BundleLookups is the set of in-progress requests to pull + and unpackage bundle content to the cluster. + type: array + items: + description: BundleLookup is a request to pull and unpackage the content + of a bundle to the cluster. + type: object + required: + - catalogSourceRef + - identifier + - path + - replaces + properties: + catalogSourceRef: + description: CatalogSourceRef is a reference to the CatalogSource + the bundle path was resolved from. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part + of an object. TODO: this design is not final and this field + is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + conditions: + description: Conditions represents the overall state of a BundleLookup. + type: array + items: + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: Last time the condition transitioned from one + status to another. + type: string + format: date-time + lastUpdateTime: + description: Last time the condition was probed. + type: string + format: date-time + message: + description: A human readable message indicating details + about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of condition. + type: string + identifier: + description: Identifier is the catalog-unique name of the operator + (the name of the CSV for bundles that contain CSVs) + type: string + path: + description: Path refers to the location of a bundle to pull. + It's typically an image reference. + type: string + replaces: + description: Replaces is the name of the bundle to replace with + the one found at Path. + type: string + catalogSources: + type: array + items: + type: string + conditions: + type: array + items: + description: InstallPlanCondition represents the overall status of + the execution of an InstallPlan. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + lastUpdateTime: + type: string + format: date-time + message: + type: string + reason: + description: ConditionReason is a camelcased reason for the state + transition. + type: string + status: + type: string + type: + description: InstallPlanConditionType describes the state of an + InstallPlan at a certain point as a whole. + type: string + phase: + description: InstallPlanPhase is the current status of a InstallPlan + as a whole. + type: string + plan: + type: array + items: + description: Step represents the status of an individual step in an + InstallPlan. + type: object + required: + - resolving + - resource + - status + properties: + resolving: + type: string + resource: + description: StepResource represents the status of a resource + to be tracked by an InstallPlan. + type: object + required: + - group + - kind + - name + - sourceName + - sourceNamespace + - version + properties: + group: + type: string + kind: + type: string + manifest: + type: string + name: + type: string + sourceName: + type: string + sourceNamespace: + type: string + version: + type: string + status: + description: StepStatus is the current status of a particular + resource an in InstallPlan + type: string diff --git a/crds/operators.coreos.com_operatorgroups.yaml b/crds/operators.coreos.com_operatorgroups.yaml new file mode 100644 index 000000000..a22bd3bfb --- /dev/null +++ b/crds/operators.coreos.com_operatorgroups.yaml @@ -0,0 +1,164 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: operatorgroups.operators.coreos.com +spec: + group: operators.coreos.com + version: v1 + versions: + - name: v1 + served: true + storage: true + "schema": + "openAPIV3Schema": + description: OperatorGroup is the unit of multitenancy for OLM managed operators. + It constrains the installation of operators in its namespace to a specified + set of target namespaces. + type: object + required: + - metadata + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorGroupSpec is the spec for an OperatorGroup resource. + type: object + properties: + selector: + description: Selector selects the OperatorGroup's target namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + serviceAccountName: + description: ServiceAccountName is the admin specified service account + which will be used to deploy operator(s) in this operator group. + type: string + staticProvidedAPIs: + description: Static tells OLM not to update the OperatorGroup's providedAPIs + annotation + type: boolean + targetNamespaces: + description: TargetNamespaces is an explicit set of namespaces to + target. If it is set, Selector is ignored. + type: array + items: + type: string + status: + description: OperatorGroupStatus is the status for an OperatorGroupResource. + type: object + required: + - lastUpdated + properties: + lastUpdated: + description: LastUpdated is a timestamp of the last time the OperatorGroup's + status was Updated. + type: string + format: date-time + namespaces: + description: Namespaces is the set of target namespaces for the OperatorGroup. + type: array + items: + type: string + serviceAccountRef: + description: ServiceAccountRef references the service account object + specified. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + - name: v1alpha2 + served: true + storage: false + names: + plural: operatorgroups + singular: operatorgroup + kind: OperatorGroup + listKind: OperatorGroupList + shortNames: + - og + categories: + - olm + scope: Namespaced + preserveUnknownFields: false + subresources: + # status enables the status subresource. + status: {} diff --git a/crds/operators.coreos.com_operators.yaml b/crds/operators.coreos.com_operators.yaml new file mode 100644 index 000000000..c9937c72d --- /dev/null +++ b/crds/operators.coreos.com_operators.yaml @@ -0,0 +1,178 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.2.8 + creationTimestamp: null + name: operators.operators.coreos.com +spec: + group: operators.coreos.com + names: + kind: Operator + listKind: OperatorList + plural: operators + singular: operator + scope: Cluster + validation: + openAPIV3Schema: + description: Operator represents a cluster operator. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorSpec defines the desired state of Operator + type: object + status: + description: OperatorStatus describes the observed state of an operator + and its components. + type: object + properties: + components: + description: Components describes resources that compose the operator. + type: object + required: + - labelSelector + properties: + labelSelector: + description: LabelSelector is a label query over a set of resources + used to select the operator's components + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + refs: + description: Refs are a set of references to the operator's component + resources, selected with LabelSelector. + type: array + items: + description: RichReference is a reference to a resource, enriched + with its status conditions. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + conditions: + description: Conditions represents the latest state of the + component. + type: array + items: + description: Condition represent the latest available observations + of an component's state. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: Last time the condition transitioned from + one status to another. + type: string + format: date-time + lastUpdateTime: + description: Last time the condition was probed + type: string + format: date-time + message: + description: A human readable message indicating details + about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, + Unknown. + type: string + type: + description: Type of condition. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part + of an object. TODO: this design is not final and this field + is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + version: v2alpha1 + versions: + - name: v2alpha1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/crds/operators.coreos.com_subscriptions.yaml b/crds/operators.coreos.com_subscriptions.yaml new file mode 100644 index 000000000..fb74b33a4 --- /dev/null +++ b/crds/operators.coreos.com_subscriptions.yaml @@ -0,0 +1,1806 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: subscriptions.operators.coreos.com + annotations: + displayName: Subscription + description: Subscribes service catalog to a source and channel to receive updates + for packages. +spec: + group: operators.coreos.com + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true + scope: Namespaced + names: + plural: subscriptions + singular: subscription + kind: Subscription + listKind: SubscriptionList + shortNames: + - sub + - subs + categories: + - olm + additionalPrinterColumns: + - name: Package + type: string + description: The package subscribed to + JSONPath: .spec.name + - name: Source + type: string + description: The catalog source for the specified package + JSONPath: .spec.source + - name: Channel + type: string + description: The channel of updates to subscribe to + JSONPath: .spec.channel + preserveUnknownFields: false + subresources: + # status enables the status subresource. + status: {} + validation: + openAPIV3Schema: + description: Subscription keeps operators up to date by tracking changes to + Catalogs. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: SubscriptionSpec defines an Application that can be installed + type: object + required: + - name + - source + - sourceNamespace + properties: + channel: + type: string + config: + description: SubscriptionConfig contains configuration specified for + a subscription. + type: object + properties: + env: + description: Env is a list of environment variables to set in the + container. Cannot be updated. + type: array + items: + description: EnvVar represents an environment variable present + in a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP, status.podIPs.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed + resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + envFrom: + description: EnvFrom is a list of sources to populate environment + variables in the container. The keys defined within a source must + be a C_IDENTIFIER. All invalid keys will be reported as an event + when the container is starting. When a key exists in multiple + sources, the value associated with the last source will take precedence. + Values defined by an Env with a duplicate key will take precedence. + Immutable. + type: array + items: + description: EnvFromSource represents the source of a set of ConfigMaps + type: object + properties: + configMapRef: + description: The ConfigMap to select from + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap must be defined + type: boolean + prefix: + description: An optional identifier to prepend to each key + in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret must be defined + type: boolean + nodeSelector: + description: 'NodeSelector is a selector which must be true for + the pod to fit on a node. Selector which must match a node''s + labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/' + type: object + additionalProperties: + type: string + resources: + description: 'Resources represents compute resources required by + this container. Immutable. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + selector: + description: Selector is the label selector for pods to be configured. + Existing ReplicaSets whose pods are selected by this will be the + ones affected by this deployment. It must match the pod template's + labels. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations are the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard for value, so that + a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do + not evict). Zero and negative values will be treated as + 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + volumeMounts: + description: List of VolumeMounts to set in the container. + type: array + items: + description: VolumeMount describes a mounting of a Volume within + a container. + type: object + required: + - mountPath + - name + properties: + mountPath: + description: Path within the container at which the volume + should be mounted. Must not contain ':'. + type: string + mountPropagation: + description: mountPropagation determines how mounts are propagated + from the host to container and the other way around. When + not set, MountPropagationNone is used. This field is beta + in 1.10. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: Mounted read-only if true, read-write otherwise + (false or unspecified). Defaults to false. + type: boolean + subPath: + description: Path within the volume from which the container's + volume should be mounted. Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which the + container's volume should be mounted. Behaves similarly + to SubPath but environment variable references $(VAR_NAME) + are expanded using the container's environment. Defaults + to "" (volume's root). SubPathExpr and SubPath are mutually + exclusive. + type: string + volumes: + description: List of Volumes to set in the podSpec. + type: array + items: + description: Volume represents a named volume in a pod that may + be accessed by any container in the pod. + type: object + required: + - name + properties: + awsElasticBlockStore: + description: 'AWSElasticBlockStore represents an AWS Disk + resource that is attached to a kubelet''s host machine and + then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: object + required: + - volumeID + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty).' + type: integer + format: int32 + readOnly: + description: 'Specify "true" to force and set the ReadOnly + property in VolumeMounts to "true". If omitted, the + default is "false". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: boolean + volumeID: + description: 'Unique ID of the persistent disk resource + in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore' + type: string + azureDisk: + description: AzureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + type: object + required: + - diskName + - diskURI + properties: + cachingMode: + description: 'Host Caching mode: None, Read Only, Read + Write.' + type: string + diskName: + description: The Name of the data disk in the blob storage + type: string + diskURI: + description: The URI the data disk in the blob storage + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'Expected values Shared: multiple blob disks + per storage account Dedicated: single blob disk per + storage account Managed: azure managed data disk (only + in managed availability set). defaults to shared' + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + azureFile: + description: AzureFile represents an Azure File Service mount + on the host and bind mount to the pod. + type: object + required: + - secretName + - shareName + properties: + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: the name of secret that contains Azure Storage + Account Name and Key + type: string + shareName: + description: Share Name + type: string + cephfs: + description: CephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + type: object + required: + - monitors + properties: + monitors: + description: 'Required: Monitors is a collection of Ceph + monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: array + items: + type: string + path: + description: 'Optional: Used as the mounted root, rather + than the full Ceph tree, default is /' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: boolean + secretFile: + description: 'Optional: SecretFile is the path to key + ring for User, default is /etc/ceph/user.secret More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + secretRef: + description: 'Optional: SecretRef is reference to the + authentication secret for User, default is empty. More + info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + user: + description: 'Optional: User is the rados user name, default + is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it' + type: string + cinder: + description: 'Cinder represents a cinder volume attached and + mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: object + required: + - volumeID + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Examples: + "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" + if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: boolean + secretRef: + description: 'Optional: points to a secret object containing + parameters used to connect to OpenStack.' + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + volumeID: + description: 'volume id used to identify the volume in + cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md' + type: string + configMap: + description: ConfigMap represents a configMap that should + populate this volume + type: object + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + type: integer + format: int32 + items: + description: If unspecified, each key-value pair in the + Data field of the referenced ConfigMap will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the ConfigMap, the volume setup will + error unless it is marked optional. Paths must be relative + and may not contain the '..' path or start with '..'. + type: array + items: + description: Maps a string key to a path within a volume. + type: object + required: + - key + - path + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this + file, must be a value between 0 and 0777. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + type: integer + format: int32 + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or its keys + must be defined + type: boolean + csi: + description: CSI (Container Storage Interface) represents + storage that is handled by an external CSI driver (Alpha + feature). + type: object + required: + - driver + properties: + driver: + description: Driver is the name of the CSI driver that + handles this volume. Consult with your admin for the + correct name as registered in the cluster. + type: string + fsType: + description: Filesystem type to mount. Ex. "ext4", "xfs", + "ntfs". If not provided, the empty value is passed to + the associated CSI driver which will determine the default + filesystem to apply. + type: string + nodePublishSecretRef: + description: NodePublishSecretRef is a reference to the + secret object containing sensitive information to pass + to the CSI driver to complete the CSI NodePublishVolume + and NodeUnpublishVolume calls. This field is optional, + and may be empty if no secret is required. If the secret + object contains more than one secret, all secret references + are passed. + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + readOnly: + description: Specifies a read-only configuration for the + volume. Defaults to false (read/write). + type: boolean + volumeAttributes: + description: VolumeAttributes stores driver-specific properties + that are passed to the CSI driver. Consult your driver's + documentation for supported values. + type: object + additionalProperties: + type: string + downwardAPI: + description: DownwardAPI represents downward API about the + pod that should populate this volume + type: object + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + type: integer + format: int32 + items: + description: Items is a list of downward API volume file + type: array + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + type: object + required: + - path + properties: + fieldRef: + description: 'Required: Selects a field of the pod: + only annotations, labels, name and namespace are + supported.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + mode: + description: 'Optional: mode bits to use on this + file, must be a value between 0 and 0777. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + type: integer + format: int32 + path: + description: 'Required: Path is the relative path + name of the file to be created. Must not be absolute + or contain the ''..'' path. Must be utf-8 encoded. + The first item of the relative path must not start + with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the container: + only resources limits and requests (limits.cpu, + limits.memory, requests.cpu and requests.memory) + are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + emptyDir: + description: 'EmptyDir represents a temporary directory that + shares a pod''s lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: object + properties: + medium: + description: 'What type of storage medium should back + this directory. The default is "" which means to use + the node''s default medium. Must be an empty string + (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir' + type: string + sizeLimit: + description: 'Total amount of local storage required for + this EmptyDir volume. The size limit is also applicable + for memory medium. The maximum usage on memory medium + EmptyDir would be the minimum value between the SizeLimit + specified here and the sum of memory limits of all containers + in a pod. The default is nil which means that the limit + is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir' + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + fc: + description: FC represents a Fibre Channel resource that is + attached to a kubelet's host machine and then exposed to + the pod. + type: object + properties: + fsType: + description: 'Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + lun: + description: 'Optional: FC target lun number' + type: integer + format: int32 + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + targetWWNs: + description: 'Optional: FC target worldwide names (WWNs)' + type: array + items: + type: string + wwids: + description: 'Optional: FC volume world wide identifiers + (wwids) Either wwids or combination of targetWWNs and + lun must be set, but not both simultaneously.' + type: array + items: + type: string + flexVolume: + description: FlexVolume represents a generic volume resource + that is provisioned/attached using an exec based plugin. + type: object + required: + - driver + properties: + driver: + description: Driver is the name of the driver to use for + this volume. + type: string + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". The default filesystem depends on FlexVolume + script. + type: string + options: + description: 'Optional: Extra command options if any.' + type: object + additionalProperties: + type: string + readOnly: + description: 'Optional: Defaults to false (read/write). + ReadOnly here will force the ReadOnly setting in VolumeMounts.' + type: boolean + secretRef: + description: 'Optional: SecretRef is reference to the + secret object containing sensitive information to pass + to the plugin scripts. This may be empty if no secret + object is specified. If the secret object contains more + than one secret, all secrets are passed to the plugin + scripts.' + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + flocker: + description: Flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + type: object + properties: + datasetName: + description: Name of the dataset stored as metadata -> + name on the dataset for Flocker should be considered + as deprecated + type: string + datasetUUID: + description: UUID of the dataset. This is unique identifier + of a Flocker dataset + type: string + gcePersistentDisk: + description: 'GCEPersistentDisk represents a GCE Disk resource + that is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: object + required: + - pdName + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + partition: + description: 'The partition in the volume that you want + to mount. If omitted, the default is to mount by volume + name. Examples: For volume /dev/sda1, you specify the + partition as "1". Similarly, the volume partition for + /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: integer + format: int32 + pdName: + description: 'Unique name of the PD resource in GCE. Used + to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk' + type: boolean + gitRepo: + description: 'GitRepo represents a git repository at a particular + revision. DEPRECATED: GitRepo is deprecated. To provision + a container with a git repo, mount an EmptyDir into an InitContainer + that clones the repo using git, then mount the EmptyDir + into the Pod''s container.' + type: object + required: + - repository + properties: + directory: + description: Target directory name. Must not contain or + start with '..'. If '.' is supplied, the volume directory + will be the git repository. Otherwise, if specified, + the volume will contain the git repository in the subdirectory + with the given name. + type: string + repository: + description: Repository URL + type: string + revision: + description: Commit hash for the specified revision. + type: string + glusterfs: + description: 'Glusterfs represents a Glusterfs mount on the + host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md' + type: object + required: + - endpoints + - path + properties: + endpoints: + description: 'EndpointsName is the endpoint name that + details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + path: + description: 'Path is the Glusterfs volume path. More + info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: string + readOnly: + description: 'ReadOnly here will force the Glusterfs volume + to be mounted with read-only permissions. Defaults to + false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod' + type: boolean + hostPath: + description: 'HostPath represents a pre-existing file or directory + on the host machine that is directly exposed to the container. + This is generally used for system agents or other privileged + things that are allowed to see the host machine. Most containers + will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + --- TODO(jonesdl) We need to restrict who can use host directory + mounts and who can/can not mount host directories as read/write.' + type: object + required: + - path + properties: + path: + description: 'Path of the directory on the host. If the + path is a symlink, it will follow the link to the real + path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + type: + description: 'Type for HostPath Volume Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath' + type: string + iscsi: + description: 'ISCSI represents an ISCSI Disk resource that + is attached to a kubelet''s host machine and then exposed + to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md' + type: object + required: + - iqn + - lun + - targetPortal + properties: + chapAuthDiscovery: + description: whether support iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: whether support iSCSI Session CHAP authentication + type: boolean + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + initiatorName: + description: Custom iSCSI Initiator Name. If initiatorName + is specified with iscsiInterface simultaneously, new + iSCSI interface : will be + created for the connection. + type: string + iqn: + description: Target iSCSI Qualified Name. + type: string + iscsiInterface: + description: iSCSI Interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: iSCSI Target Lun number. + type: integer + format: int32 + portals: + description: iSCSI Target Portal List. The portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + type: array + items: + type: string + readOnly: + description: ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. + type: boolean + secretRef: + description: CHAP Secret for iSCSI target and initiator + authentication + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + targetPortal: + description: iSCSI Target Portal. The Portal is either + an IP or ip_addr:port if the port is other than default + (typically TCP ports 860 and 3260). + type: string + name: + description: 'Volume''s name. Must be a DNS_LABEL and unique + within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + nfs: + description: 'NFS represents an NFS mount on the host that + shares a pod''s lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: object + required: + - path + - server + properties: + path: + description: 'Path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + readOnly: + description: 'ReadOnly here will force the NFS export + to be mounted with read-only permissions. Defaults to + false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: boolean + server: + description: 'Server is the hostname or IP address of + the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs' + type: string + persistentVolumeClaim: + description: 'PersistentVolumeClaimVolumeSource represents + a reference to a PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: object + required: + - claimName + properties: + claimName: + description: 'ClaimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims' + type: string + readOnly: + description: Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + photonPersistentDisk: + description: PhotonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host machine + type: object + required: + - pdID + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: ID that identifies Photon Controller persistent + disk + type: string + portworxVolume: + description: PortworxVolume represents a portworx volume attached + and mounted on kubelets host machine + type: object + required: + - volumeID + properties: + fsType: + description: FSType represents the filesystem type to + mount Must be a filesystem type supported by the host + operating system. Ex. "ext4", "xfs". Implicitly inferred + to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: VolumeID uniquely identifies a Portworx volume + type: string + projected: + description: Items for all in one resources secrets, configmaps, + and downward API + type: object + required: + - sources + properties: + defaultMode: + description: Mode bits to use on created files by default. + Must be a value between 0 and 0777. Directories within + the path are not affected by this setting. This might + be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode + bits set. + type: integer + format: int32 + sources: + description: list of volume projections + type: array + items: + description: Projection that may be projected along + with other supported volume types + type: object + properties: + configMap: + description: information about the configMap data + to project + type: object + properties: + items: + description: If unspecified, each key-value + pair in the Data field of the referenced ConfigMap + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the ConfigMap, the + volume setup will error unless it is marked + optional. Paths must be relative and may not + contain the '..' path or start with '..'. + type: array + items: + description: Maps a string key to a path within + a volume. + type: object + required: + - key + - path + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode bits + set.' + type: integer + format: int32 + path: + description: The relative path of the + file to map the key to. May not be an + absolute path. May not contain the path + element '..'. May not start with the + string '..'. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the ConfigMap or + its keys must be defined + type: boolean + downwardAPI: + description: information about the downwardAPI data + to project + type: object + properties: + items: + description: Items is a list of DownwardAPIVolume + file + type: array + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + type: object + required: + - path + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name and namespace are supported.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode bits + set.' + type: integer + format: int32 + path: + description: 'Required: Path is the relative + path name of the file to be created. + Must not be absolute or contain the + ''..'' path. Must be utf-8 encoded. + The first item of the relative path + must not start with ''..''' + type: string + resourceFieldRef: + description: 'Selects a resource of the + container: only resources limits and + requests (limits.cpu, limits.memory, + requests.cpu and requests.memory) are + currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + secret: + description: information about the secret data to + project + type: object + properties: + items: + description: If unspecified, each key-value + pair in the Data field of the referenced Secret + will be projected into the volume as a file + whose name is the key and content is the value. + If specified, the listed keys will be projected + into the specified paths, and unlisted keys + will not be present. If a key is specified + which is not present in the Secret, the volume + setup will error unless it is marked optional. + Paths must be relative and may not contain + the '..' path or start with '..'. + type: array + items: + description: Maps a string key to a path within + a volume. + type: object + required: + - key + - path + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use + on this file, must be a value between + 0 and 0777. If not specified, the volume + defaultMode will be used. This might + be in conflict with other options that + affect the file mode, like fsGroup, + and the result can be other mode bits + set.' + type: integer + format: int32 + path: + description: The relative path of the + file to map the key to. May not be an + absolute path. May not contain the path + element '..'. May not start with the + string '..'. + type: string + name: + description: 'Name of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, + kind, uid?' + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + serviceAccountToken: + description: information about the serviceAccountToken + data to project + type: object + required: + - path + properties: + audience: + description: Audience is the intended audience + of the token. A recipient of a token must + identify itself with an identifier specified + in the audience of the token, and otherwise + should reject the token. The audience defaults + to the identifier of the apiserver. + type: string + expirationSeconds: + description: ExpirationSeconds is the requested + duration of validity of the service account + token. As the token approaches expiration, + the kubelet volume plugin will proactively + rotate the service account token. The kubelet + will start trying to rotate the token if the + token is older than 80 percent of its time + to live or if the token is older than 24 hours.Defaults + to 1 hour and must be at least 10 minutes. + type: integer + format: int64 + path: + description: Path is the path relative to the + mount point of the file to project the token + into. + type: string + quobyte: + description: Quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + type: object + required: + - registry + - volume + properties: + group: + description: Group to map volume access to Default is + no group + type: string + readOnly: + description: ReadOnly here will force the Quobyte volume + to be mounted with read-only permissions. Defaults to + false. + type: boolean + registry: + description: Registry represents a single or multiple + Quobyte Registry services specified as a string as host:port + pair (multiple entries are separated with commas) which + acts as the central registry for volumes + type: string + tenant: + description: Tenant owning the given Quobyte volume in + the Backend Used with dynamically provisioned Quobyte + volumes, value is set by the plugin + type: string + user: + description: User to map volume access to Defaults to + serivceaccount user + type: string + volume: + description: Volume is a string that references an already + created Quobyte volume by name. + type: string + rbd: + description: 'RBD represents a Rados Block Device mount on + the host that shares a pod''s lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md' + type: object + required: + - image + - monitors + properties: + fsType: + description: 'Filesystem type of the volume that you want + to mount. Tip: Ensure that the filesystem type is supported + by the host operating system. Examples: "ext4", "xfs", + "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + TODO: how do we prevent errors in the filesystem from + compromising the machine' + type: string + image: + description: 'The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + keyring: + description: 'Keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + monitors: + description: 'A collection of Ceph monitors. More info: + https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: array + items: + type: string + pool: + description: 'The rados pool name. Default is rbd. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + readOnly: + description: 'ReadOnly here will force the ReadOnly setting + in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: boolean + secretRef: + description: 'SecretRef is name of the authentication + secret for RBDUser. If provided overrides keyring. Default + is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + user: + description: 'The rados user name. Default is admin. More + info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it' + type: string + scaleIO: + description: ScaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + type: object + required: + - gateway + - secretRef + - system + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Default is "xfs". + type: string + gateway: + description: The host address of the ScaleIO API Gateway. + type: string + protectionDomain: + description: The name of the ScaleIO Protection Domain + for the configured storage. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef references to the secret for ScaleIO + user and other sensitive information. If this is not + provided, Login operation will fail. + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + sslEnabled: + description: Flag to enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: Indicates whether the storage for a volume + should be ThickProvisioned or ThinProvisioned. Default + is ThinProvisioned. + type: string + storagePool: + description: The ScaleIO Storage Pool associated with + the protection domain. + type: string + system: + description: The name of the storage system as configured + in ScaleIO. + type: string + volumeName: + description: The name of a volume already created in the + ScaleIO system that is associated with this volume source. + type: string + secret: + description: 'Secret represents a secret that should populate + this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: object + properties: + defaultMode: + description: 'Optional: mode bits to use on created files + by default. Must be a value between 0 and 0777. Defaults + to 0644. Directories within the path are not affected + by this setting. This might be in conflict with other + options that affect the file mode, like fsGroup, and + the result can be other mode bits set.' + type: integer + format: int32 + items: + description: If unspecified, each key-value pair in the + Data field of the referenced Secret will be projected + into the volume as a file whose name is the key and + content is the value. If specified, the listed keys + will be projected into the specified paths, and unlisted + keys will not be present. If a key is specified which + is not present in the Secret, the volume setup will + error unless it is marked optional. Paths must be relative + and may not contain the '..' path or start with '..'. + type: array + items: + description: Maps a string key to a path within a volume. + type: object + required: + - key + - path + properties: + key: + description: The key to project. + type: string + mode: + description: 'Optional: mode bits to use on this + file, must be a value between 0 and 0777. If not + specified, the volume defaultMode will be used. + This might be in conflict with other options that + affect the file mode, like fsGroup, and the result + can be other mode bits set.' + type: integer + format: int32 + path: + description: The relative path of the file to map + the key to. May not be an absolute path. May not + contain the path element '..'. May not start with + the string '..'. + type: string + optional: + description: Specify whether the Secret or its keys must + be defined + type: boolean + secretName: + description: 'Name of the secret in the pod''s namespace + to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret' + type: string + storageos: + description: StorageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + type: object + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: Defaults to false (read/write). ReadOnly + here will force the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: SecretRef specifies the secret to use for + obtaining the StorageOS API credentials. If not specified, + default values will be attempted. + type: object + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + volumeName: + description: VolumeName is the human-readable name of + the StorageOS volume. Volume names are only unique + within a namespace. + type: string + volumeNamespace: + description: VolumeNamespace specifies the scope of the + volume within StorageOS. If no namespace is specified + then the Pod's namespace will be used. This allows + the Kubernetes name scoping to be mirrored within StorageOS + for tighter integration. Set VolumeName to any name + to override the default behaviour. Set to "default" + if you are not using namespaces within StorageOS. Namespaces + that do not pre-exist within StorageOS will be created. + type: string + vsphereVolume: + description: VsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + type: object + required: + - volumePath + properties: + fsType: + description: Filesystem type to mount. Must be a filesystem + type supported by the host operating system. Ex. "ext4", + "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: Storage Policy Based Management (SPBM) profile + ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: Storage Policy Based Management (SPBM) profile + name. + type: string + volumePath: + description: Path that identifies vSphere volume vmdk + type: string + installPlanApproval: + description: Approval is the user approval policy for an InstallPlan. + type: string + name: + type: string + source: + type: string + sourceNamespace: + type: string + startingCSV: + type: string + status: + type: object + required: + - lastUpdated + properties: + catalogHealth: + description: CatalogHealth contains the Subscription's view of its relevant + CatalogSources' status. It is used to determine SubscriptionStatusConditions + related to CatalogSources. + type: array + items: + description: SubscriptionCatalogHealth describes the health of a CatalogSource + the Subscription knows about. + type: object + required: + - catalogSourceRef + - healthy + - lastUpdated + properties: + catalogSourceRef: + description: CatalogSourceRef is a reference to a CatalogSource. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a valid + JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part + of an object. TODO: this design is not final and this field + is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + healthy: + description: Healthy is true if the CatalogSource is healthy; + false otherwise. + type: boolean + lastUpdated: + description: LastUpdated represents the last time that the CatalogSourceHealth + changed + type: string + format: date-time + conditions: + description: Conditions is a list of the latest available observations + about a Subscription's current state. + type: array + items: + description: SubscriptionCondition represents the latest available + observations of a Subscription's state. + type: object + required: + - status + - type + properties: + lastHeartbeatTime: + description: LastHeartbeatTime is the last time we got an update + on a given condition + type: string + format: date-time + lastTransitionTime: + description: LastTransitionTime is the last time the condition + transit from one status to another + type: string + format: date-time + message: + description: Message is a human-readable message indicating details + about last transition. + type: string + reason: + description: Reason is a one-word CamelCase reason for the condition's + last transition. + type: string + status: + description: Status is the status of the condition, one of True, + False, Unknown. + type: string + type: + description: Type is the type of Subscription condition. + type: string + currentCSV: + description: CurrentCSV is the CSV the Subscription is progressing to. + type: string + installPlanGeneration: + description: InstallPlanGeneration is the current generation of the + installplan + type: integer + installPlanRef: + description: InstallPlanRef is a reference to the latest InstallPlan + that contains the Subscription's current CSV. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + installedCSV: + description: InstalledCSV is the CSV currently installed by the Subscription. + type: string + installplan: + description: 'Install is a reference to the latest InstallPlan generated + for the Subscription. DEPRECATED: InstallPlanRef' + type: object + required: + - apiVersion + - kind + - name + - uuid + properties: + apiVersion: + type: string + kind: + type: string + name: + type: string + uuid: + description: UID is a type that holds unique ID values, including + UUIDs. Because we don't ONLY use UUIDs, this is an alias to string. Being + a type captures intent and helps make sure that UIDs and names + do not get conflated. + type: string + lastUpdated: + description: LastUpdated represents the last time that the Subscription + status was updated. + type: string + format: date-time + reason: + description: Reason is the reason the Subscription was transitioned + to its current state. + type: string + state: + description: State represents the current state of the Subscription + type: string diff --git a/crds/zz_defs.go b/crds/zz_defs.go new file mode 100644 index 000000000..e761549fe --- /dev/null +++ b/crds/zz_defs.go @@ -0,0 +1,360 @@ +// Code generated by go-bindata. (@generated) DO NOT EDIT. + + //Package crds generated by go-bindata.// sources: +// operators.coreos.com_catalogsources.yaml +// operators.coreos.com_clusterserviceversions.yaml +// operators.coreos.com_installplans.yaml +// operators.coreos.com_operatorgroups.yaml +// operators.coreos.com_operators.yaml +// operators.coreos.com_subscriptions.yaml +package crds + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// ModTime return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _operatorsCoreosCom_catalogsourcesYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x58\x51\x8f\xe3\xb8\x0d\x7e\x9f\x5f\x41\xe0\x1e\xf6\x65\xe2\xc1\xa2\x45\x51\x04\xd7\x03\xa6\x99\x5e\x91\xf6\x76\x67\x30\xd9\x5d\xa0\x8f\x8c\xc4\xc4\xea\xc8\x92\x4f\xa2\x93\x4b\x8b\xfe\xf7\x82\x92\xed\x38\x71\x92\x49\x8a\xae\x5f\x82\xc8\x12\x29\x7e\x24\x3f\x92\xc6\xda\x7c\xa3\x10\x8d\x77\x53\xc0\xda\xd0\x6f\x4c\x4e\xfe\xc5\xe2\xed\x8f\xb1\x30\xfe\x61\xf3\x71\x49\x8c\x1f\xef\xde\x8c\xd3\x53\x98\x35\x91\x7d\xf5\x4a\xd1\x37\x41\xd1\x13\xad\x8c\x33\x6c\xbc\xbb\xab\x88\x51\x23\xe3\xf4\x0e\xc0\x61\x45\x53\x50\xc8\x68\xfd\x3a\xef\x8c\x85\xaf\x29\x20\xfb\x10\x0b\xe5\x03\x79\xf9\xa9\xee\x00\xd0\x39\xcf\x28\x22\xa2\x1c\x05\xd0\x26\xd6\x16\x77\x9f\x93\x8c\x59\x96\xb1\x48\x32\xf2\x6b\x8a\x2a\x98\x9a\xd3\x85\x1f\x21\x4b\x07\xe5\xdd\xca\xac\x9b\x40\x1a\xd8\xc3\xca\x38\x0d\x35\xaa\x37\x5c\x53\x04\x74\x1a\x9a\x5a\x23\x53\x2c\xee\x62\x4d\x4a\xf4\xac\x83\x6f\xea\x29\x9c\xb9\xd4\xa6\x43\x64\xf3\x11\x6d\x5d\xe2\xc7\xfd\x5a\xba\xe5\xa4\x35\x71\xf0\x1a\x20\x52\xd8\x90\x9e\x02\x87\x26\x5f\x35\xb2\x0f\xb8\xa6\x7e\xa5\x0e\x94\xf6\x7c\x75\x6f\xce\x6f\xdd\xcf\x86\xac\x8e\x53\x58\xa1\x8d\xf2\x3a\x2a\x5f\xd3\x14\xc4\xf0\x58\xa3\x22\xdd\x22\xd9\xe2\x52\xdb\x26\xa0\x3d\x86\x35\x2b\x32\x6e\xdd\x58\x0c\x47\x2f\xd3\xbb\xd6\x6d\x23\x1c\xad\x89\xfc\xf7\xf1\xbb\x5f\x4c\xe4\x2c\xb3\xf4\x81\x3f\xef\xd5\x4f\x44\x76\x0c\x2a\xfd\x51\xc8\xb4\xf6\xc1\xec\x5f\x7a\x9b\x9c\xa9\x75\x8a\x06\xb4\x2f\xc1\x38\xa6\x30\xf3\xb6\xa9\x0e\x31\x7b\xca\xfe\x4d\xe7\x78\x27\x16\x47\x0e\xc6\xad\xc7\xde\xfd\x52\x92\x60\xc6\xbc\x4b\x47\xc1\xaf\x80\x4b\xea\x6c\x4c\xfb\xff\xb6\x78\xfe\xfc\x82\x5c\x4e\xa1\x10\xcf\x16\x83\xe0\x19\xa8\xfc\xb2\xab\xe9\x4a\x7d\xb2\xe3\x1a\x45\x19\xe1\x56\x70\xa7\xe7\xa5\x59\x5a\x13\x4b\x0a\xd7\x1a\xd7\xed\xbf\x46\x63\x3d\x10\xde\x29\x7c\x5c\x0f\xed\x92\x10\x3f\x3e\xda\xa5\x65\xa1\x02\xa5\x2c\xfb\x62\x2a\x8a\x8c\x55\x2d\x11\xd7\x2c\x43\x9b\xc9\xad\x23\x7f\x80\xc8\xc8\x4d\x04\x72\xb8\xb4\x14\xd3\xad\xda\xa5\xc1\xee\xa2\x8d\x6f\x59\x9f\xc2\xbf\xff\x23\xe9\x81\xd6\xe8\xa4\x21\x4b\xf2\x35\xb9\xc7\x97\xf9\xb7\xdf\x2d\x54\x49\x15\xe6\xc5\x23\x0c\x0e\x22\x0f\x4c\x04\x84\x40\xb5\x8f\x86\x7d\xd8\x09\x26\xb3\xc5\xb7\x78\x0f\xb3\xd7\xa7\x78\x9f\xd2\xb8\x4b\xd7\x3e\xb9\x8b\x56\x6c\x06\xc0\x2f\xff\x49\x8a\xdb\xa5\x40\xbf\x36\x26\x90\xee\x34\x4f\xa0\xc3\xa2\x5f\x10\x5c\xdb\x3f\x75\x10\xd9\xdc\x47\xb4\x3c\x03\x6a\xec\xd7\x8e\x2c\xf8\x20\x26\xe6\x3d\xa0\x85\x0c\x5b\xc8\x5a\xb2\x20\x0d\x31\x99\x9f\x1d\x6c\xa2\xd8\x27\x2c\xe0\x32\xe5\x0d\xc4\x82\x6c\x41\xd7\xda\x50\xc0\x42\x98\x22\x44\xc9\xc3\xc6\x6a\x21\xb8\x0d\x05\x86\x40\xca\xaf\x9d\xf9\x57\x2f\x39\x0a\xe1\x89\x4a\x2b\x04\xc7\x07\x12\x53\x0a\x3a\xb4\xe2\x9c\x86\x32\x84\x15\xee\x20\x90\xe8\x80\xc6\x0d\xa4\xa5\x2d\xb1\x80\x4f\x3e\x10\x18\xb7\xf2\x53\x28\x99\xeb\x38\x7d\x78\x58\x1b\xee\x8a\x81\xf2\x55\xd5\x38\xc3\xbb\x07\xe5\x1d\x07\xb3\x6c\x84\x3c\x1f\x34\x6d\xc8\x3e\x44\xb3\x9e\x60\x50\xa5\x61\x52\xdc\x04\x7a\xc0\xda\x4c\xd2\xc5\x5d\xe2\xf7\xa2\xd2\x3f\xf4\xf1\xf6\x61\x70\xd3\x51\xa6\xf4\xc4\x75\x16\x77\xa1\xae\x1c\x31\xf9\x58\xbe\xff\x1e\x5e\x59\x12\x54\x5e\xff\xb2\xf8\x02\x9d\xd2\xe4\x82\x43\xcc\x13\xda\xfb\x63\x71\x0f\xbc\x00\x65\xdc\x8a\x42\x76\xdc\x2a\xf8\x2a\x49\x24\xa7\x6b\x6f\x1c\xe7\x84\xb5\x86\xdc\x21\xe8\xb1\x59\x56\x86\x63\x8a\x3f\x8a\x2c\xfe\x29\x60\x96\xca\x1c\x2c\xa9\xad\x44\xba\x80\xb9\x83\x19\x56\x64\x67\x18\xe9\xbb\xc3\x2e\x08\xc7\x89\x40\xfa\x3e\xf0\xc3\x4a\x7e\xb8\xf1\x20\xbf\x00\xba\x5a\x7a\x71\xd3\x38\x11\xdb\xdc\x1b\x12\x68\xf7\x9c\xca\xc2\x94\x89\x5a\x07\x8a\x47\x8b\xa3\x54\xcc\x9b\x72\x54\x94\x3e\x8a\x87\x90\xe1\xf9\x97\x4f\xa0\xd0\x41\x13\x49\x52\x45\x79\xe7\xc4\xe5\xec\x01\xa5\xba\x4c\xe8\x37\x13\x79\x68\xff\xfe\xde\x6b\x13\x39\xec\x0a\xf8\xd9\x87\x0a\x79\x0a\x3f\x76\x4b\x93\x24\xde\x07\x30\xf5\x4f\xd3\x1f\x6b\x1f\xf8\x27\x78\x76\x76\x27\x4a\x34\x6c\x4b\x72\xb0\x38\x65\x5f\x7e\xfe\x34\x78\xf9\xd7\x50\xab\x02\xe6\x6b\xe7\x43\x77\x52\xe2\x6a\x5e\xe1\x9a\x60\x25\x1d\x82\xd8\x13\x89\x8b\x0f\x47\x72\x4e\xfa\x4e\x9e\xdc\x0b\x7d\xc2\xfa\x22\x5c\xb3\x6e\x97\xc8\x17\x95\xc3\x12\xbb\x7f\xc9\x3e\x05\x6d\xcc\x8d\xd5\x12\xd5\xdb\xc8\x1c\x6c\x35\x56\x58\x4f\x52\x7f\x13\x06\xd0\x9d\x45\xe5\x00\x85\x59\x27\x60\x24\xdc\x87\xc1\xb6\x79\xcb\x67\xc5\xb5\x50\x0c\x2d\xbe\xfa\xcc\xa0\xff\xbc\x04\xe0\xa7\xc3\x6a\xf2\xae\x5c\xa3\xce\x5d\x62\x94\x31\x70\x26\x6b\x20\x65\xce\x12\x23\xfd\xe1\xf7\x27\x54\x4b\x85\xd3\x06\x79\x1c\x73\xe7\xf2\x4a\x9e\xbd\xb8\xf1\xbb\x0b\xe6\x40\x62\x8a\x56\xdd\x4d\x27\x8d\xc4\xf6\x45\x68\x73\xf4\x4b\x1e\xbb\xbe\xe4\x4f\xba\x98\x92\x68\x63\x34\x8e\x42\x96\x24\x71\x69\x5c\x64\x74\x6c\xba\x06\x68\xf8\x60\x1f\x8d\x5d\x74\x6e\x0d\x97\xd7\x46\x66\xca\xcf\x91\xcc\xf9\x0a\xda\x82\x71\x9f\xf2\xa5\xe5\xa7\x7d\xc2\x9a\x9c\xd0\x57\x07\x6a\xdf\xdc\x5d\x1d\xa6\x91\x54\x20\xbe\x4c\x89\x8b\xbc\x67\x5f\xde\x84\x48\x24\xc7\xdb\xc3\x99\x20\x85\x1c\x07\x39\x8e\x4a\x51\x8c\x23\x93\x53\xb5\xf3\x8e\xa5\x48\x1e\xb5\xab\x05\xcc\x59\x8c\x5e\x52\x4c\xbc\xfa\x46\x54\xe7\xa2\x29\x43\x06\xc4\x0a\xad\xbd\x97\x21\x45\x8d\xbd\x43\xa8\x4a\xd8\x1a\x6b\xc1\x51\xcb\x31\x04\x1c\x0c\x69\x58\xf9\x00\xb4\x21\xf1\x78\xd6\x03\xe4\x84\x53\x4e\x22\x84\x21\xb4\x33\xc5\xfe\x31\x4c\xd5\x89\x80\x3f\x0f\x69\xef\xf5\xcb\xa8\xee\x23\xa5\xa5\xce\x6e\x68\x18\x8c\x5d\x57\xa8\xcb\x9d\xc0\x82\x83\x0c\x53\xbb\x8b\x2a\xbf\x1e\x6c\xed\x3b\xcd\xd2\x6f\xbb\x7e\xa2\xc7\xa8\xed\x74\x52\x6a\x8c\xbd\xd8\xfa\x5a\x9b\xa8\xfc\x86\xa4\xe4\xcc\xbc\x8b\x26\x66\x9f\xa2\xcb\x2d\xe3\x06\x6d\x0e\x8d\x4e\x51\xed\xad\x95\x8e\x4a\x37\x61\xdc\xb7\xca\x23\x9d\x25\x3a\xa0\x6a\x49\x5a\x4b\x6f\xda\x5d\xf5\x04\x19\x5d\x20\xbd\x4b\x3c\xd5\x65\xf1\x8b\xb7\xf6\x3c\xdf\x9c\x14\xfb\x9e\x68\xe8\x7a\xe5\x0d\x9e\x14\x3d\xf2\xc7\xbc\x43\xc9\xc4\x3e\x6f\x34\x31\x85\xca\x38\xca\x21\x61\x2a\xea\x65\x9e\x11\x09\xb0\x24\xde\x12\x39\x50\x25\xa9\xb7\x3e\xad\x72\x27\x7f\xec\xd1\x76\xa4\x28\xd2\xe4\x38\x1c\x14\x4f\x3d\xfb\x41\xc9\x5b\x9b\x46\x84\x48\x04\x66\x05\x08\x8e\xb6\x9d\xac\xa3\x3c\x3e\x1d\xbf\x03\x88\x22\xe0\x06\x8d\x95\xc9\xb0\x10\x0e\xec\xff\xdd\x0f\xef\x6d\x3a\xf6\xae\x1b\x6b\x49\xe2\x42\x9f\x15\xb9\x7e\x7d\x99\x01\x07\x5c\xad\x8c\x92\x23\xda\x04\x52\x9c\x01\x3d\x0b\xc5\x98\x8f\xf3\x73\x32\xd3\xda\x21\xf5\xfd\x36\xf5\x5c\x80\xf4\xed\xd4\x2b\xad\x28\x90\x53\x23\x76\xf8\x9f\xaa\xb8\xcb\x9f\x28\xc6\x8b\xe9\xdb\xcf\x0d\x79\x61\x31\x72\xe6\x07\x19\xed\x6f\xae\xe1\x20\x3c\x9b\xba\x5b\x11\x31\x91\xb8\x1d\xed\x71\x27\x7a\xa1\x77\x05\xf7\xb6\xdc\x7c\xb2\x1b\xd7\x4e\x0c\xdf\x57\x9d\x6f\xcc\x08\x6f\x18\x31\xea\xfc\x29\x0f\x0a\x89\xbb\x13\xd9\x95\xde\xea\x08\x8d\x33\xbf\x36\x04\xf3\xa7\x76\x22\xbe\x07\xe3\x94\x6d\xf4\x69\xec\x00\xbe\x7e\x9d\x3f\xc5\x02\xe0\xcf\xa4\x50\x46\x8c\x2d\x81\xf6\xee\x03\xc3\xf3\xe7\x5f\xfe\x91\x86\x8e\xb4\xe3\x3e\x17\xc4\xdc\xd3\xa0\x35\x79\x6e\xcf\x06\xa4\xd3\xe7\xe4\xb7\x37\x54\x58\xcb\x6c\x17\x13\xa7\x38\x4e\x84\x5b\x92\xad\x65\x4e\x7d\x23\x88\x4d\x68\xad\x10\x65\xe9\x6d\xc2\xff\xa4\x48\xed\x41\x86\xd1\x35\x71\x0a\x6e\x9b\xe6\xd1\x5b\x40\x6e\x67\x28\xe3\xdd\x82\x91\xff\x3f\xf9\x20\x61\xfc\xbc\xcc\xdf\x50\x93\xd4\x1b\x52\xe0\xcc\x78\x78\xd1\x06\x68\x53\x67\x96\x6d\xf9\x2e\x79\x33\xb2\xe9\x26\x2d\x99\xfb\x52\x2f\xfc\x7a\xa1\xf6\x8d\xbe\x2a\x8a\xd6\x5c\x7f\xd2\x28\x77\xf8\x8d\x2d\x71\x73\xdf\x4a\x97\x28\x5d\x1b\x8d\xeb\xb9\x94\x8d\xcc\xc2\xe4\xda\xd8\xa2\x3d\xb1\x37\xf5\x84\xfd\x44\x8f\x9d\x74\x01\xb1\xcb\x68\x55\x14\xe3\x7b\x53\xc1\x23\x94\x4d\x85\x0e\x02\xa1\x96\xa2\xd3\x1d\x02\xe3\xb4\x51\x98\x3e\xf9\x68\x62\x34\x36\x02\x2e\x7d\xc3\xb0\x2d\x77\x72\xf1\x91\x75\xa3\xef\x8e\xc6\xe5\xf4\x54\xde\xe5\x0f\xd8\x57\x37\xee\x81\x30\x8e\x29\xea\xe0\xde\xaf\x69\x4b\xd7\x2b\xe6\x03\x27\x5c\xb3\x15\x4a\x08\xe8\x62\xba\x00\x8d\x6b\xa6\xcc\x39\x1c\x41\x35\x21\xa4\x56\x5e\x02\xea\x86\x7b\x66\x97\x2f\x28\x6c\xcc\x4d\x25\xec\x52\xe2\xa5\x8f\xca\xa4\x1f\xbf\x4f\xfa\xd4\x3e\xdc\x2e\xb8\x0e\x9e\xbd\xf2\x17\x5a\xc4\x33\x07\x63\x46\xe6\xd4\xdc\x7f\xcb\xd9\xeb\x4a\xde\x7f\x03\x00\x00\xff\xff\x1b\x2c\x83\x94\x76\x1b\x00\x00") + +func operatorsCoreosCom_catalogsourcesYamlBytes() ([]byte, error) { + return bindataRead( + _operatorsCoreosCom_catalogsourcesYaml, + "operators.coreos.com_catalogsources.yaml", + ) +} + +func operatorsCoreosCom_catalogsourcesYaml() (*asset, error) { + bytes, err := operatorsCoreosCom_catalogsourcesYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operators.coreos.com_catalogsources.yaml", size: 7030, mode: os.FileMode(420), modTime: time.Unix(1586375941, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _operatorsCoreosCom_clusterserviceversionsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\xfd\x7b\x73\x1c\x37\x92\x2f\x0c\xff\x3f\x9f\x02\xa1\xf1\x06\xc9\x35\xbb\x29\xcd\xed\x9d\xd1\xd9\xf7\x71\x70\x25\xda\xc3\xc7\x12\xc5\x10\x69\xfb\x4c\x78\xbc\x3e\xe8\xaa\xec\x6e\x1c\x56\x03\x35\x00\xaa\xc9\x9e\xf5\x7e\xf7\x27\x90\x00\xea\xd2\x17\xb2\x0b\x28\x9a\x94\x06\xb9\x11\x3b\x26\xc5\xce\xc6\x25\x91\x48\x64\xfe\x32\x93\x96\xec\x7b\x90\x8a\x09\xfe\x9a\xd0\x92\xc1\x9d\x06\x6e\x7e\x52\xe3\x9b\x3f\xab\x31\x13\x27\xcb\x57\x13\xd0\xf4\xd5\x6f\x6e\x18\xcf\x5f\x93\x37\x95\xd2\x62\xf1\x11\x94\xa8\x64\x06\x6f\x61\xca\x38\xd3\x4c\xf0\xdf\x2c\x40\xd3\x9c\x6a\xfa\xfa\x37\x84\x70\xba\x80\xd7\x24\x2b\x2a\xa5\x41\x2a\x90\x4b\x96\xc1\xd2\x7e\x89\x1a\x8b\x12\x24\xd5\x42\xaa\x71\x26\x24\x08\xf3\x3f\x8b\xdf\x10\x42\x39\x17\x9a\x1a\x56\xca\xb0\x20\x24\x67\xaa\x2c\xe8\xea\x02\x79\x7d\x70\x1f\x22\x6e\xac\xf6\x2f\x40\x65\x92\x95\x1a\xc7\xfe\x11\x4a\x09\x0a\xb8\x56\x84\xf2\xe6\xef\xf5\x9c\x6a\xa2\xe6\xa2\x2a\x72\x32\x01\x22\x2b\xce\x19\x9f\x11\xc1\x89\x9e\x83\x1f\xe2\x31\x61\x3c\x2b\xaa\x9c\xf1\x19\x32\x26\x44\xc2\x3f\x2a\x26\x61\xe1\xf8\xe5\x84\x71\xa5\x69\x51\x10\xa5\x25\xd5\x30\x5b\x8d\x7f\xa3\x4a\xc8\xfc\x64\xdd\x90\xcb\xa2\x92\xb4\xd8\x35\x73\xfc\x13\xc5\xf8\xac\x2a\xa8\xdc\xf1\x47\xf8\x37\x6e\xa5\xed\xbf\x5f\xd9\x7f\x6f\xcf\xbb\x60\x4a\x7f\xbb\xfb\x6f\xde\x31\xa5\xed\x77\xcd\x85\xd4\x17\xcd\xf0\x46\x24\x53\xcb\xe6\xbf\xec\x80\x32\x33\x1d\x21\x59\xf3\x47\xa2\xc0\xfd\xc8\x73\xdc\x58\x5a\x5c\x4a\xc6\x35\xc8\x37\xa2\xa8\x16\x76\x73\x46\x6e\x87\xdf\xda\x2d\xc2\xcf\xe9\x55\x09\xaf\xcd\xf2\xf8\x45\xec\xec\xce\xf5\x1c\xf0\x33\x44\x4c\x71\xe1\xdf\x5c\x7d\x8f\x7f\xf4\xff\x5e\x7d\xb8\xb8\xa4\x7a\xfe\x9a\x8c\xcd\x7a\x8e\x5b\x9b\xde\xfa\x9e\xf6\xe4\x1f\xfe\x1e\xb7\x94\x0f\x7d\x55\xb3\xe2\xfe\x6b\x3e\x42\x59\xd0\x0c\x54\xcf\xf9\x50\xf3\x15\x56\xd0\xf4\x9c\x29\x22\x38\x10\xd9\x66\xb5\xfe\xcd\xad\x7f\xf4\x5f\x7d\x39\xa7\x0a\xb6\x7f\x6f\xfb\xd3\x9a\xea\x4a\x8d\x4b\xf7\xc7\x33\x29\xaa\xf2\x35\xd9\x71\xa0\x96\xfe\x54\x2f\x5f\xd1\xa2\x9c\xd3\x57\xcd\xef\x3a\x9b\xd8\xfa\x67\x42\x8c\x34\x42\xfe\x9a\x68\x59\xd9\xe1\x28\x2d\x24\x9d\x41\xfd\x1b\x95\x09\x33\x3e\x94\xaa\x92\x66\x90\xff\x86\x10\x3c\x78\x72\x09\xdf\xf1\x1b\x2e\x6e\xf9\xd7\x0c\x8a\x5c\xbd\x26\x53\x5a\xe0\x30\x55\x35\x91\x4e\x5f\x38\x19\xb3\xf3\x78\x4d\xfe\xfb\x7f\xcc\x98\x68\xc1\x72\x3c\xf8\xf6\x1f\x45\x09\xfc\xf4\xf2\xfc\xfb\xdf\x5f\x65\x73\x58\xd0\xd7\xee\x44\x76\x96\x7f\xab\xdc\x13\xa6\xcc\x5e\xa0\x82\x22\x5e\x43\xa1\x14\xac\x4a\x20\xff\x67\xeb\x67\xae\x4a\xc8\xfe\xcf\xd8\x7d\x85\x5d\x7a\x31\xf9\xbf\x90\xe9\xae\x1e\xc8\xfd\x28\x46\xc4\xeb\xb9\xfa\x17\x66\x4b\xdd\x0f\xa5\x34\x7b\xa1\xeb\xb3\x64\xa8\xa5\x5f\xeb\xdf\xad\xcd\xe6\xc0\x4c\xd7\xcd\x21\x37\x1a\x15\x14\x4a\xae\xdb\x2d\xc8\x89\xc2\xa5\xb0\x12\xcd\x94\x91\x2e\xab\xeb\x70\xd9\x5a\x6c\x09\xca\x23\x77\x73\x18\x13\x33\x59\x90\xca\x2b\xc0\x4c\xf0\x25\x48\x4d\x24\x64\x62\xc6\xd9\x3f\x6b\xce\x8a\x68\x81\x5f\x59\x50\x0d\x4a\x77\x38\xe2\xe1\xe7\xb4\x30\x1b\x55\xc1\x31\x6a\xc3\x05\x5d\x11\x09\xe6\x3b\x48\xc5\x5b\xdc\xf0\x4f\xd4\x98\xbc\x17\x12\x08\xe3\x53\xf1\x9a\xcc\xb5\x2e\xd5\xeb\x93\x93\x19\xd3\xfe\x46\xc9\xc4\x62\x51\x71\xa6\x57\x27\x99\xe0\x5a\xb2\x49\x65\xa4\xf7\x24\x87\x25\x14\x27\x8a\xcd\x46\x54\x66\x73\xa6\x21\xd3\x95\x84\x13\x5a\xb2\x11\x0e\x9c\xe3\xe5\x30\x5e\xe4\xbf\xad\xc5\xe9\xa0\x35\xd2\x8d\x83\x53\xab\xd2\x9d\xeb\x6e\x94\xa8\x95\x1a\xfb\x31\x3b\xfe\x66\x79\xcd\xaf\xcc\xaa\x7c\x3c\xbb\xba\x26\xfe\x4b\x71\x0b\xba\x6b\x8e\xab\xdd\x7c\x4c\x35\x0b\x6f\x16\x8a\xf1\x29\x48\xbb\x71\x53\x29\x16\xc8\x11\x78\x5e\x0a\xc6\xb5\xbb\x85\x18\xf0\xee\xa2\xab\x6a\xb2\x60\x5a\xa1\xfc\x81\xd2\x66\x7f\xc6\xe4\x0d\xde\x91\xe6\x1a\xab\xca\x9c\x6a\xc8\xc7\xe4\x9c\x93\x37\x74\x01\xc5\x1b\xaa\xe0\xd1\x97\xdd\xac\xb0\x1a\x99\x25\x7d\x78\xe1\xdb\xe6\x40\xf7\x0f\x3b\xe7\x8b\x10\x7f\x8d\x6e\xdd\xa1\x9d\x67\x96\xe4\x90\x15\x54\x5a\x7b\x81\x68\x28\x0a\xf2\xe1\xdd\x7b\x32\x17\xb7\x46\x92\xdd\x65\xdd\x59\x51\x73\x2a\x3a\x46\x41\x46\x39\x59\x50\x4e\x67\x40\x68\x59\x2a\x32\x15\x92\x50\x32\x63\x4b\xe0\xfe\xe4\x8d\x1f\x1a\xfc\xa6\x82\x20\xa8\x13\xba\xd7\x58\xf3\xfb\xcd\x81\x6d\x53\x19\x76\xb8\x6b\xe6\xd0\x8e\x15\x3a\x6d\xfe\x0e\x25\x99\x93\x8a\x2b\x2d\x2b\xdc\xc4\x9c\xdc\xc0\xca\x09\xf5\x82\x96\xa8\xcc\x21\x27\xb7\x4c\xcf\xd7\x78\x12\x42\xdb\x02\x4e\x35\x4a\xee\x04\x88\x02\x4d\x26\x2b\x62\xcc\x42\x54\x02\x5a\x88\x02\xb5\x05\xf2\x42\x65\x20\x41\x4b\x06\x4b\xd8\x64\x29\x27\x4c\x4b\x2a\x57\xb5\x34\x8c\xd7\xfe\x66\xc7\xa2\xe2\x87\x5b\xf6\xc7\xf6\x25\x22\xbb\x64\x8f\x58\xb5\xeb\x2c\xab\xbc\xb6\x51\x1f\x58\xc9\xcb\x73\x27\x67\x8d\x55\xab\x9c\x9c\x81\x22\xc6\xf8\x73\x16\x45\x6d\x23\xe3\xb7\x6c\x8c\xc9\x0a\x55\x4e\x84\xac\xa5\xc3\x2c\x61\x5b\x00\x27\x60\x54\x8b\xa4\xdc\xfc\xc3\x56\x21\xef\xb1\x52\xbb\x44\xc8\x90\xb8\xe5\x5d\xd9\xec\xf2\xa3\x52\x3a\xf3\xad\x4b\x4c\xc3\x62\x0b\xb7\x7b\xd7\xac\xfe\xb5\x19\xd0\x92\xe5\x60\x16\x4f\x53\x66\xc5\xc5\x9c\x4e\x3a\x11\xd5\xfa\xe0\xdb\xfb\xe5\x3f\x98\x93\x25\xa3\x84\xce\x66\x12\x66\x9b\x37\xdc\x1e\x2b\x62\x69\xdb\xd1\x6c\x68\x64\xcd\xa7\x1d\xff\x66\x94\xdc\x8e\x7f\xe2\xdd\x43\xdd\xfe\xa7\xb6\x11\xbf\x4e\xf7\xed\x92\x25\x9a\x99\xb9\xfa\x85\x14\x72\xe7\x1f\x3e\xb4\x7b\x96\xee\xd9\x43\x4b\xdd\x9d\x5c\xfb\x72\xf7\xaf\x13\x23\xf9\x8d\xb2\x5d\x82\x1b\xe5\x3d\x6c\x49\xa3\x5e\x27\x40\x4a\x90\x53\x21\x17\xe6\x38\x70\x42\x49\x66\xad\xb3\x5a\xd5\xa0\x42\xe4\xd9\xf6\x05\x6d\xcf\xf5\x9e\x7d\xb6\x74\xff\x6e\x5b\x1a\x91\x92\x6e\x51\x7d\x0d\x3d\xbc\x49\x96\xda\x4b\x77\xef\x1f\xde\xa3\xa1\x36\x78\xb6\xde\xbb\x43\xf1\x34\xd3\x1d\x8c\x19\xde\x23\x0f\x71\xeb\xbe\xc9\xe9\xed\x7b\x50\xca\x5c\xb1\x68\x65\x49\x7a\x4b\x80\x67\xc2\x1c\x71\xf3\xac\xb1\x2c\xd7\x55\xdd\x26\x9d\x6b\xc2\x16\x65\xe1\x9e\xe3\xef\xa9\x54\x73\x5a\x80\xc4\xdb\xe7\x3b\xbe\xe8\xfc\x6c\xe5\xee\x41\x96\x95\x82\xdc\xe8\xa5\x1c\x0a\xba\xb2\x83\xc9\x21\x13\x39\x3a\x07\xa4\x79\xd2\x64\x62\x51\x56\x1a\x08\xb5\xff\x8a\xe3\x66\x7c\xf6\xd0\x68\xf7\x5e\x4e\x62\x2c\x8e\x05\xd5\xaf\xc9\x64\xa5\xef\x1f\xf0\xdd\x28\xdf\x47\x2b\xb4\x07\x70\xbf\x6e\xb0\xf4\xa0\x86\x68\x33\xbc\x77\x46\xc6\xa0\xa4\x8c\x83\xbc\x14\x52\x3f\xa4\xb6\xcc\x83\x62\x06\x72\xe7\x5f\xf9\x65\x61\x5c\xff\xfe\x77\x3b\xfe\x2a\x87\xb2\x10\x2b\x23\x0f\xf7\x9f\x97\x3d\xc6\xbe\xd7\x59\xde\x87\xcf\x3e\xe7\x77\x0f\x3e\xf6\x55\x1f\xc3\x61\xfd\xd1\xd3\x9b\x01\x8f\x9d\xc3\xda\x7b\x7f\x37\x97\x41\xaf\xb0\xcb\x73\xff\xea\xff\x08\x53\x90\xc0\x33\xa7\x77\xbe\xad\x26\x20\x39\x68\x50\xf5\xc8\xee\xbf\xc0\x56\x25\x58\x0d\x61\xec\xb6\xf5\x4b\xeb\xd7\xba\xab\x76\xda\x20\xfe\x0f\x76\x5a\x22\xfe\x0f\xee\xb3\x47\x2c\xed\x7b\xe1\xdd\x2f\x52\x96\xf6\xd6\x7c\xf7\x8b\x57\x4f\x66\xcb\x4d\xe7\x4a\x20\x3f\xf3\x12\x7d\x22\xeb\xeb\xaa\xf3\xd5\x1d\xdb\x6b\xca\xa0\xc8\x09\x33\xc6\x53\xcb\xd3\xb4\x9d\x26\x85\xc8\x6e\x9c\x4b\xf2\xe3\x5b\xa2\x84\x35\xc5\x8c\xed\x6d\xae\xc5\x4c\x70\x55\x2d\x80\xb0\xfb\x64\x33\x99\x5b\xe1\x3c\x93\xb9\xb5\x8b\x92\xb9\x35\xa4\xb9\x65\xfd\xe7\x4f\xa5\xab\xd6\xbe\x7c\xa7\xb6\xc2\xbf\x4b\xfa\xea\xbe\x3f\x4c\xfa\x2a\xe9\xab\x9d\xf4\xf9\xe8\xab\x07\x6d\xb4\x7b\x79\xdc\x77\x74\x93\x1b\x33\xb9\x31\x93\x1b\x73\x9d\xd2\x3d\x75\x0f\xa5\x7b\x2a\xdd\x53\xc9\x8d\x99\xdc\x98\xc9\x8d\x79\xef\x07\x93\x1b\x33\xb9\x31\x93\x1b\x93\x24\x73\x2b\x92\x59\x32\xb7\x92\xb9\x95\xdc\x98\x49\x5f\x79\x9e\x49\x5f\xed\xa2\xa4\xaf\x9e\xb9\xbe\x0a\x77\x63\x5a\xd3\xdb\x5b\xde\xfb\x22\x52\x5f\xec\x4a\xb6\xda\x0e\x4b\x7d\xf3\xf1\xad\xf2\xe0\xd3\x8d\x21\xc6\x81\x51\xc9\xdf\x37\xad\xef\xf3\xfa\x5b\x09\xfa\x3c\x11\xfa\x6e\x74\xa6\xf9\xed\x87\x5b\x0e\x39\x26\x2a\x1d\x13\xa6\xcd\x1f\x18\xd1\x66\x19\xd3\xc5\xaa\x1e\xc8\xf8\xc5\x6f\xb6\xad\xdf\x73\x03\xb9\xbe\xf9\xf8\x76\x6f\xb7\xb0\x99\xfc\x0e\xf1\x30\xdb\xf3\x38\x1e\xe0\xe4\xe5\x4d\x5e\xde\x7b\x28\x5d\xe3\xbb\x28\x5d\xe3\xff\x8a\xd7\xf8\x73\xf3\x94\x26\x3f\x67\xf2\x73\x26\x3f\x67\x6f\x7e\xc9\xcf\x49\x92\xc1\x11\xc5\x33\x19\x1c\xbb\x28\x19\x1c\xc9\xcf\xb9\x49\x49\x5f\x25\x7d\x95\xf4\xd5\x2e\xfa\x7c\xf4\xd5\x73\x87\x6b\x26\x87\xdc\x16\x4a\x0e\xb9\x7b\xe6\x9a\xee\x9b\x10\x9e\xe9\xbe\xd9\x45\xe9\xbe\x49\x0e\xb9\xe4\x90\xdb\x49\xc9\x21\xb7\x4e\xc9\x21\xb7\xc1\x2b\x39\xe4\x92\xc1\xb1\x46\xc9\xe0\xd8\x45\xc9\xe0\x48\x0e\xb9\x4d\x4a\xfa\x2a\xe9\xab\xa4\xaf\x76\xd1\xe7\xa3\xaf\xc2\x1d\x72\xf7\x9c\xa4\xdd\x9f\xd9\x7d\x52\x76\x7e\x86\x65\xbb\xbe\x60\xdb\x4a\xed\x58\x99\x7b\x55\xd1\x6e\x05\x34\x22\x13\xaa\xe0\x4f\x7f\x68\x55\x65\x6e\xff\xe3\x02\x72\x46\x0d\xeb\x8d\x7f\xbb\x5f\x1d\x35\x4c\xb7\xaf\xfb\x03\xfb\x56\x7f\x6d\xcf\x4f\xbb\x12\xb1\xf7\x02\x43\xcd\xd6\xe4\xe7\xf6\x0f\xaf\x5c\x41\xfa\x56\xf9\x61\x44\x60\x36\x77\x03\xbf\xa7\x94\x7c\x9b\x6a\xb7\xda\xed\x1c\x24\x20\x93\xf5\xaa\xf7\x46\xcf\x18\x2b\x9a\x4d\x19\xe4\x3d\x4a\x94\xee\xda\xbc\x51\xcd\x79\xed\x1f\xee\xdb\x9a\xf5\x8a\xc1\x5b\x97\xc8\x2f\xcb\x5b\xeb\x13\x7e\x5b\x27\x5a\xae\xaf\x53\x49\xa5\xd1\x55\xce\x77\xbc\x75\xaf\xf0\x82\x6d\x71\x58\x5b\xf9\x6d\x2a\xeb\x81\x5b\xf5\xbe\xdb\x74\xd4\xca\x0a\xdd\x36\x9e\x87\x2e\x51\xd7\x57\xe0\x12\xe4\x82\x29\xb5\x0d\x64\xdc\x1d\xe4\x7d\xaa\xec\x01\x15\xb6\x63\xc5\xfd\xe8\x5b\x43\xa8\x8d\x1c\x5c\x73\x39\xa1\xf7\x3d\xc4\x64\x55\x80\xed\xba\xe0\x4a\xf8\x12\x9a\x65\xa2\xe2\x9a\x70\x80\xdc\x7a\x15\xb6\x49\xe7\x03\xea\xf1\x01\x1b\xe7\x61\x0b\x67\x64\x47\x76\xcf\xbf\xbb\xf1\x9e\xda\xe1\x5e\xdc\xe7\x5f\xd8\xcf\x16\xc2\x2f\xbc\xff\x0e\xd9\xf7\x42\xda\xeb\x3a\xea\xec\xe8\xa5\x28\x58\xb6\xfa\x58\x15\x40\xe6\xa2\xc8\x15\x96\x16\x37\x77\x2a\xc3\xf6\x19\x54\x37\xa6\xeb\x83\xb7\x1c\x25\x25\x72\xc3\x19\x1d\x93\x49\xa5\x49\x2e\x40\x11\x2e\xb4\x4f\xbb\xee\xb0\xbf\xaf\xc8\x46\x43\xb7\x73\x5b\xb6\xde\x30\x25\xb4\x2c\x0b\x06\x18\xfe\x11\x92\xdc\xce\x59\x36\xb7\xcd\x39\x4a\x9a\x41\xfd\x67\x0f\x8f\xb4\x66\xb3\x9f\x2d\xf2\xa0\xe9\x4c\xf6\x34\x9f\x89\xf7\x39\x4d\x1e\x5a\xce\x7d\xed\x68\x62\x2b\x92\x7c\x23\x45\x55\xee\xf1\xa7\x9b\xfe\x40\xfb\x49\xa3\xf5\xf5\x5a\x03\x0f\xff\x8f\x7b\x30\xf5\x21\x1d\xbb\xcb\x96\x55\xed\xe0\x1c\x63\x22\xc1\xa2\x2a\x34\x2b\x0b\x64\xbb\x17\xc7\x99\x1d\x18\x95\xd0\xdc\x47\xc7\x84\xf2\x95\x8b\x30\xf9\xc2\xf9\x90\x13\x3a\x33\xdf\xfa\xb0\x2c\x19\x12\xbc\x9e\x20\xf0\x6a\x01\x46\xaf\xe4\xcd\x60\xf1\x61\xc6\x57\x66\x94\xf7\x94\x5d\x59\xa7\x5b\x56\x14\x64\x02\x84\x16\x85\xb8\xdd\xbc\x36\xb7\xd1\xfe\x46\x26\xd9\xdf\xd0\x24\xfd\xcc\x67\x42\xb8\xe0\xde\x21\xfc\xdd\xc7\x77\xfd\x05\xe8\xa2\xfb\x79\xd7\x69\x01\xb4\x59\xe2\x92\x4a\xcd\x68\xb1\xd7\xa0\x2b\x59\x28\x2b\x43\xd4\x3c\x2c\xa4\x6f\x67\x31\xa7\x18\x51\xcc\x40\xd9\xfe\x08\xe4\xdf\x51\x22\xf6\xe2\xe9\x36\xc3\x6a\x22\xc1\x8b\x15\xa1\x56\x32\xa7\x55\x51\x1c\x93\x29\xe3\xd4\x5c\x2c\x50\xba\x8c\x96\xbd\x98\x9a\xf7\x20\xb9\x62\x3c\x03\xb3\x76\xa3\xda\xa0\xc2\xd9\x1b\x59\x35\xda\xae\x56\x49\xf9\xf1\x9e\x87\x87\x29\xef\x17\x50\x6e\xa8\x46\x45\x65\x74\x52\x00\x76\x2d\x70\xe6\xdd\x47\x71\xdf\xe5\xd4\x26\xe9\xdd\xfb\xb9\xed\x48\x41\xdb\x2c\xfe\x93\x71\x7c\x88\x91\x8f\x78\x0d\x67\xf4\xfe\x70\xad\x27\x60\x7a\x6e\xde\x88\x65\x59\xac\x8c\x0a\x36\x67\xa4\x39\x38\x87\xaa\xca\xe6\x66\x89\x5f\x94\x22\x57\xeb\x79\x3f\xdb\x49\x48\xf2\x42\x41\x26\x41\xab\x17\x47\xe6\xa7\xf5\x35\xc5\xf5\x76\xbc\xf7\xdb\x74\x45\x5e\x9c\xd0\x92\xbd\x38\x3a\x26\xb8\xf1\xd8\x4e\x43\xe8\xf9\xa7\x73\x26\xfd\xfc\x5b\x6d\x9d\xee\xa7\xb5\x26\x59\xad\x4f\xbb\x7e\x11\xa2\xb4\x6d\x16\xcc\x8d\xf9\xc0\x43\xd9\x53\xc1\x14\x1e\x62\x14\x65\xdf\xf9\x68\xe3\x16\x1e\x13\x72\xca\x09\x2c\x4a\xbd\xcf\x82\x11\xd4\x0c\x0b\xa0\xdc\x71\x84\x25\xc8\x95\x9e\x33\x3e\xc3\x81\x7e\x6a\xca\x73\x8f\x10\x5e\x43\x5b\x37\xc9\x29\x4c\xbf\xd8\xcd\x61\xda\x68\x40\xb3\x73\x0c\x1b\x1b\xe2\x79\x9f\x16\x45\xfb\x21\x44\xf1\x47\x7f\x25\x7f\x32\x6b\x8c\xb6\x52\xef\xf5\xfd\xde\x7c\xaa\xbb\xb6\xf6\x57\xf6\x8a\x31\x0a\x6c\xbf\xa1\x0a\x72\xfa\xee\x9d\x6d\x10\xe4\xd6\xee\x5b\xc6\x73\xfb\x72\x39\xd5\xb6\xc3\x0e\x7c\x04\x33\x19\x34\x4a\xf6\xdb\x34\x5f\x84\x28\xb7\x37\x0f\x53\xb8\x8d\x63\x82\x83\xdc\xdc\xb7\xbd\x78\x62\xcf\x9e\x4f\x61\x5f\x37\x1f\x50\xfb\x3c\x7e\xee\x61\xdb\x7a\x4b\x3f\xd5\x33\x18\xfb\x15\x75\xac\x5f\x34\xa4\xe9\xbd\xd1\x69\x63\xd5\xe2\xe5\x6e\xfe\xbe\x99\x04\x39\x7d\xf7\xbe\xee\xe6\x25\x81\xde\xa3\xb0\x07\x7b\xf4\xde\x1b\x26\x1f\xdd\x1f\x55\xdd\xef\xa5\xf2\x70\x40\x7b\x5f\xf1\xd9\xe1\x1a\x6a\xa8\xb3\x57\x6b\x7b\xe4\x9e\x39\xee\x41\x91\xd9\x87\xa8\x7d\x0e\x3c\xfc\x6c\xb6\x99\xd5\x30\xa7\x4b\x26\xa4\x7f\x44\x34\x5f\x70\xff\xe1\xdb\xfb\x25\xb9\xdf\x3b\x72\x44\x14\x14\x90\x69\xb1\xbb\x3a\x99\xfd\x33\x0d\x8b\xb2\xb8\x4f\x88\x48\xaf\xd7\xe6\x82\xf1\x8f\x40\xf3\xd5\x15\x64\x82\xe7\x7b\x68\x86\xce\x6e\xbc\x67\x9c\x2d\xaa\x05\xe1\xd5\x62\x02\xb8\x84\xca\xf2\xc1\x73\x60\xdf\xf3\x9b\x0e\xde\x4d\xe2\x70\x5b\xac\xdc\xe9\xc8\x49\x29\xf2\x76\xff\x4f\x33\x3c\x6c\x80\x25\x2a\x8d\x6f\x39\x31\xdd\x4b\xd1\xa9\xa6\x38\x1c\xc9\x24\x55\xc6\x2e\x39\xc6\x81\x31\x6d\x6e\x82\x09\x60\xb8\x8d\xe5\x20\xb7\x64\xdf\x6f\x12\x5d\x52\x56\x18\x0b\x7e\x4c\xde\xc2\x94\x56\x05\xf6\x78\x23\x2f\xc9\xa1\x19\xb0\x7f\x32\x86\xb1\x34\x76\xae\x12\x82\x9b\xff\xb5\xc9\xf7\x38\xed\xa3\x3d\x43\x11\x0f\x15\xb6\xf3\xb4\x4f\x81\x3b\x4f\x25\xad\xd4\xc3\xde\x8f\x35\x81\x38\xe7\xb9\x39\x85\x6d\x13\xb3\xa5\x09\x99\x72\x5c\x1f\xbe\xd7\xec\xbc\x26\x42\x14\xf0\xe0\x6b\xa6\x94\x62\x26\x41\xa9\xb7\x40\xf3\x82\x71\x08\x93\xe5\xeb\x39\x90\x05\xbd\x43\x79\xd6\x6c\x01\xe6\x2e\x6f\x4b\x33\x6d\xcd\x64\x9f\x6b\x59\x90\x05\xbd\x81\x7a\x70\x64\x02\x53\xec\xf5\x87\xcb\xd0\x48\x89\x93\xc4\x29\x65\xc5\x7e\xb6\xf2\x75\x77\x4d\xb1\x31\xa0\x28\x0a\x90\x56\x04\xcd\xcf\x8c\x57\x60\xf8\x96\x52\x98\x97\xf6\x1e\x4c\xed\xd7\xb7\xaf\x5e\x34\x89\xa8\x61\x67\x9b\xba\xe1\x09\x24\x94\x5c\xae\x2d\xf6\xd9\x5d\x86\x1e\xde\x3d\xbe\x44\x02\x55\xc8\xc8\x9e\x14\x55\xc9\xa9\x79\x53\xfb\x02\x14\xad\x49\xb9\xae\xa9\x7b\xf0\xbc\x10\xda\x75\xbf\xab\x17\x1a\xf9\xbb\xae\x8b\xa0\x34\x5b\xa0\x52\xc9\xab\xbd\xec\x53\x82\x43\xc1\xfd\xa7\xdb\x45\xb7\x73\xf8\xff\xf4\xf2\xe5\x1e\xc3\x7c\xbc\x43\x2a\x01\xdd\x0a\x7d\x65\xfd\xa2\xd6\xd7\xfe\x1a\x34\x6f\xfc\x31\xb9\x36\xb6\x2b\x9a\xd9\xd8\xea\x72\x8f\xc1\xa2\xa0\xe7\x4c\x69\xc6\x67\x15\x53\x73\x32\x01\x7d\x0b\xc0\x09\xdc\xd9\xe2\x21\xe4\x9f\x20\x05\x0a\x93\xd9\x92\x9d\x91\xa8\x6d\xd4\x5e\xe8\x57\x4f\xbb\xca\x4b\xa6\x98\xe0\x7f\x65\x4a\x0b\xb9\x7a\xc7\x16\xec\x9e\x5a\xa3\x9e\x36\xbb\x0f\xd7\xab\x2e\x8a\x1c\x5b\x17\xb3\x8c\x5e\x81\x9d\xa0\x04\x73\x55\xed\xb7\xde\xf8\xb2\x26\xe6\xd4\x4f\x68\x76\xb3\x65\xdb\xd6\x36\x65\x0f\xae\xfb\x6e\x5b\xbd\x29\xfb\x8d\xf4\xd5\xcb\xa7\xdc\x37\x6f\x52\xf5\xdc\xab\x77\x74\x02\x45\xfd\x61\x54\xfe\xf6\x78\x9c\xdd\xd9\x35\x6d\xef\xdd\x1e\xeb\x70\x3b\x17\x0a\x90\x85\xf5\x7e\x23\x63\x1f\x13\x63\xaa\x56\x88\x46\xf3\x08\xbe\x97\x43\x90\x4e\xa7\x5d\x1e\x8d\xaa\x1a\x93\x73\x4d\x16\x95\xd2\x64\x41\x75\x36\xb7\x11\x53\xb1\x8f\x7e\xf6\xa6\xe5\x81\x22\x85\x59\x82\xbd\x35\xdb\x5e\xf1\x94\x7e\x31\x10\x62\x47\x7f\x76\x67\x1e\xce\xf7\x86\x44\xbb\xd4\xd9\xc8\x75\x16\x5d\x0f\x02\xce\x71\x2f\xa6\x8d\x24\x75\xba\xcc\x8f\xf1\x54\x77\xfb\xce\xef\xe9\xcb\x26\xe4\xf4\xe2\xed\x7e\x5a\xb0\xef\x0b\xbf\xd7\x1b\x7f\x3d\x7e\x64\x17\x65\xeb\x74\xc9\x9e\x1e\x2c\x82\xa1\xc3\x9a\x43\x37\x90\x64\xbb\x4d\x1f\x13\x4a\x6e\x60\x75\xfc\xc0\x83\x7a\x8d\xe7\x7a\x37\x60\x09\x85\xb3\x34\x01\x9b\xe6\x1a\x65\xe5\xba\x59\xef\xc9\xb4\x97\xf4\x5a\xda\x37\x2a\xe8\x69\x64\x86\xb6\xf7\xdf\xfa\x09\xee\xf9\x81\x7e\x07\xca\xd2\x0d\xac\xf6\xff\xe3\x35\xf1\x30\xab\xec\x5e\xdc\x56\x4e\xcc\x2f\xbc\xa1\xdf\x83\x69\xeb\x3c\xed\x1f\xba\x6d\x53\x2f\x97\xa3\x27\xbf\xb8\xc1\xd3\xaf\xc5\xaf\xed\xce\x33\x6b\x70\xb0\xff\xc9\x20\x28\x43\x85\x6d\xff\x3c\x67\x25\x5e\xe7\x3e\xc2\xe6\x7b\xb1\x7f\x4f\x0b\xb6\xff\xc1\x68\xcf\xcd\xde\x30\xe7\xfc\xd8\x18\xc6\xe6\x7f\xf0\xca\xb2\xc6\xfc\x5b\x01\xea\x42\x68\xfc\xcd\xa3\x2f\xb6\x9d\x4a\xf0\x52\xdb\x8f\xbb\x70\x07\x6a\x3e\x74\x2e\xf4\x1b\x43\x33\x8e\xb1\x2f\x7d\x57\x6f\x21\x53\xe4\x9c\x13\x21\xdd\x3a\xf5\xe2\x89\x8d\xfe\xed\x00\xed\xd0\xf0\xb6\x9d\xd8\x08\x22\x06\x4f\xcc\xf7\xf5\x66\xd9\x1e\x9b\xdb\x36\x21\x3b\xbb\x36\xd0\x30\xdd\x10\xd1\x6a\xdc\xff\x56\xf1\x84\xbe\x89\xb2\xc0\x97\x9b\x7d\x54\xd9\x66\xfc\x54\xc3\x8c\x65\x64\x01\x72\x06\xa4\x34\x37\x6f\x7f\x21\xeb\x3b\x9a\x5e\x37\x5d\xfb\x8b\xf6\x96\x24\x34\x21\xd0\x20\x0c\x36\x40\xec\xa7\xad\xed\xb1\xa0\xa5\x11\xe4\xff\x36\xb7\x1f\xee\xcd\xff\xec\xab\xea\x29\x93\x6a\x4c\x4e\x89\x62\x7c\x56\x40\x9b\x83\x7f\x3f\xb7\xbe\x6c\x4f\xa6\x66\x34\x4c\x11\x73\xa5\x2d\x69\x61\xee\x78\xa3\x8d\x38\x01\x0b\x2c\x36\x23\x5d\x37\xa1\xf6\x15\x41\x6b\xf3\x9a\xfb\xa1\x8e\x83\xbf\xb8\x81\xd5\x8b\xe3\x0d\x51\x7f\x71\xce\x5f\xec\xcb\xd5\xa8\xb1\x0d\xb1\xae\x8d\x0b\x0c\xb4\xbf\xc0\x7f\x7b\xb1\xaf\xec\x6d\xb3\xe0\xfa\xdb\x66\x3d\x4c\x88\xfd\x9a\xe4\xdf\xf7\x65\x7b\xc9\xae\x07\xd8\x05\x3c\x53\x3b\x7e\x18\x07\x22\xd5\x82\x54\x0a\xec\x3b\x15\xcf\xfe\x1e\x23\x06\xff\x5a\xc2\x97\x0f\xba\x8f\x38\xdc\xe2\x0b\xe7\x49\x1f\x16\xe6\xdd\xcc\xf8\xec\xbb\x32\xa7\xfa\x41\xf4\xbe\xa5\xce\x1a\x1d\x7c\xb4\x0c\x48\x85\x1c\x8c\xfc\x4d\xd9\x8c\x94\x54\xd2\x85\x1a\x93\x4b\x6b\x1a\xec\xb9\xa5\x28\xb3\x6c\xda\x8e\x6b\xb8\x35\xbf\x5e\x95\x40\xfe\xff\xe4\x63\x7b\xb4\xfb\x4a\xf5\x68\x34\x22\xd7\x1f\xde\x7e\x78\x4d\xec\xe7\xec\x1b\x51\x0b\x32\x15\xe8\x3a\x10\x95\x34\xc3\x5e\x02\x7f\x30\xb7\xbf\x21\xf3\x0e\x16\x1c\x3e\x4c\x8f\xc9\xed\x9c\x6a\x58\x82\x24\xb7\x46\x5c\x32\x96\x43\xed\x53\x1f\x1f\x3c\xce\xb9\xe9\x6f\xeb\x2e\xe8\xdd\x55\x25\x67\x7b\x6e\x31\xd9\xd8\xe6\xb6\x43\xb8\x71\xdd\xa0\x30\x1b\x9b\xb7\xc7\xad\xe3\x4a\x23\xa8\x6c\x0e\x79\x55\x40\x4e\xe8\x44\x2c\xc1\x39\x3d\xad\xff\xcd\x7e\x41\x0f\x9e\x6e\x28\x68\x2c\x56\xe0\xbf\xc2\x58\x4b\x13\x25\x8a\x4a\x43\x7f\x96\x87\x70\xf7\x9a\xfc\x11\x71\x3b\x94\x94\x20\x33\xe0\x9a\xce\x60\xdd\x4d\xd8\x97\xe3\xab\x97\xff\x76\xe4\xac\x0d\x33\x4a\xe7\x99\x7d\x69\xa4\xfe\x3d\xbd\xfb\x8e\xd7\x41\x90\x3e\x17\xbf\x22\x2f\xc7\xe4\xb4\x3b\x55\x74\xac\xd3\x22\xab\x0a\xf4\xf8\x4e\xa5\x58\xf4\x60\xd9\x9a\xf0\x64\x45\xa4\xa8\x10\x53\x45\xaa\xb2\xeb\xf1\xfd\xdd\x1f\xff\xad\x8f\x5d\x73\x76\x47\x17\x65\x01\xaf\xc9\xed\x1c\x1c\x36\x80\x29\xb4\xf7\xb5\x20\xbf\x7f\xf9\x6f\xf6\x3e\xe4\x70\xdb\x83\x67\xe3\x7d\x6a\x24\x8b\x1a\xb1\xaa\x4a\xc2\x16\x36\x8b\x01\xf6\x44\x44\x58\x72\x83\x03\xaf\x1b\xbd\x6a\x53\x9a\x4a\xad\x8e\x09\x42\xc1\x7a\x0a\x3d\xfa\xd1\x85\xa6\xc5\x9a\xdf\x13\x7d\x8a\x70\x6b\x0f\x52\x2e\x7a\x70\x34\x92\x03\x18\x6e\x20\xaf\x7e\xff\xf2\xdf\x36\xdd\xd7\x1f\x78\x06\xe6\x3b\xfa\xec\xba\x19\x05\xa2\x12\x27\x00\x9c\xdc\xb0\xa2\x80\xfc\x18\x07\xd8\x5d\xe6\x1e\x3c\x3b\x1b\x32\xad\xa4\x9e\x83\x3c\x26\xc0\x95\xb5\x96\xed\x83\xd9\xac\x4c\x9f\xb9\x77\x15\x90\xac\x38\x47\xcb\xdb\x46\x44\x31\x5e\xb1\x77\x84\xc3\x93\xd9\x21\xb7\xd1\xc6\x38\xd5\x64\x21\x94\xde\xbe\xb4\xfb\x29\x75\x82\x26\xda\xea\xc3\x74\x7f\x95\x3b\xea\xe9\xfe\x5d\xff\x5c\xcf\x27\xe1\xdd\xe8\xa6\x2e\x8c\x30\x62\x5c\x8f\x84\x1c\x59\x16\xaf\x89\x96\xd5\xbe\x2a\x68\xd1\xd1\x5b\xcf\xe6\x82\xa9\x9a\x31\xf9\xa7\x59\xb3\xc7\xee\xa6\xe8\xcf\x75\xf3\x4e\xd9\x75\x53\xf4\xbb\xbe\xda\x02\xd6\xbe\x29\x4e\x83\x2f\xb0\x4d\xfd\xbf\x43\xab\xf7\x71\x4b\x88\x5b\xbe\xfb\xf6\x42\x03\xc3\xde\x46\x3d\x78\xae\xdf\x27\xbb\x6f\x89\x3e\xa7\xb9\x75\x9f\x74\xe3\x4b\xdd\x5b\xa2\xe7\xdc\x0d\xdf\xff\xdf\xa6\x46\x78\x94\x8b\xc6\x69\xf0\x7e\xb7\x61\x1d\x57\x41\xc4\xc4\xf1\xa0\x73\xaf\x95\xb7\xb5\x9b\x6d\xbc\xc5\xb0\xb2\x43\xef\xe9\xf6\xec\x5e\x2a\x1b\x37\x82\xbf\x2f\xc3\x6f\x85\x16\x9c\x44\x23\x40\xd4\xdc\x0b\x6a\x80\x8b\xa1\x00\xaa\xf4\x36\x31\x48\x17\x43\x43\xbb\x33\x47\xd7\xa9\xfb\xe4\x36\xef\x3c\x5c\xd6\x26\x82\xf7\xc6\x0a\xed\x8b\x8f\x60\x31\x51\xfb\xa1\xef\x2d\xfe\xbe\xf3\x60\x7c\x51\x1b\xaf\x66\x27\x03\xde\x92\x3d\x16\xd3\x87\x0f\xfb\xba\x1c\xdc\xc7\x5a\x45\x15\x5c\xc8\xd2\x81\x88\x6e\xd9\x5e\x90\xd9\x09\x78\x00\xd9\xd3\x86\x2d\x41\xd3\xdd\x19\xc8\xeb\xd4\x35\x08\xae\x34\xe5\x39\x95\xb9\x1b\xd9\xc1\x81\xaa\xd9\x8d\xc9\x7b\xb1\x77\x6c\x91\xf1\xa9\x78\x4d\xe6\x5a\x97\xea\xf5\xc9\xc9\x8c\xe9\xf1\xcd\x9f\xd5\x98\x89\x93\x4c\x2c\x16\x15\x67\x7a\x75\x82\xb0\x21\x36\xa9\xb4\x90\xea\x24\x87\x25\x14\x27\x8a\xcd\x46\x54\x66\x73\xa6\x21\xd3\x95\x84\x13\x5a\xb2\x51\xe3\x25\x50\xe3\x45\xfe\x5b\x3f\x9c\x47\x7a\xe4\x77\x4e\x21\xc6\x58\xe4\x12\x46\x15\xbf\xe1\xe2\x96\x8f\xd0\x93\xa8\xf6\x3e\x8f\x0f\x23\x4b\x3d\xad\xed\xc2\x16\x30\xe9\x06\x5c\x74\xdf\x03\x39\xf5\xc2\x6c\x37\xf0\x11\xb7\xc6\x4c\x77\x44\x79\x3e\xb2\xe0\xa9\x47\xda\xa1\x7e\xf1\xcf\x51\x03\xc1\xdc\xcf\x6d\xd0\xdf\xcb\x43\x33\xcd\x96\xd0\x1b\xfa\xe7\xa9\xb3\xf5\x1f\x7c\x5e\x4d\x5e\x49\xbb\xfb\x0d\x06\xb0\xe7\xf5\x59\x8a\x9c\x2c\xe8\x0a\xed\x67\x1c\x21\x11\xd6\xf0\xe1\x22\x07\x17\x01\x5c\xf6\x31\x1e\xb4\x20\x57\xc6\x48\xba\x36\x4f\x3d\x87\x25\x44\xe8\xf3\x4a\x69\x58\x58\x0c\x89\xfd\xa6\x5e\x06\x99\x96\x2b\x0b\x55\x94\x37\x84\x69\x8f\x03\x34\x6f\xf4\x1b\xe4\xa8\x94\xc8\x8c\x95\xd7\xc7\x74\x6a\xf6\xdc\x7b\xa7\x7c\xe4\x89\x92\x52\x28\x86\xcb\xe1\xee\xf5\xfd\xcd\xe6\x30\x73\xa0\x05\x17\xfa\xd3\x1f\xf6\x15\xa9\x29\x76\xed\xea\x11\x27\xef\x62\x60\xa7\xed\x04\x57\x27\x0c\x07\xca\xbb\xff\xfa\x99\x62\x99\xe0\x4a\x4b\xca\xb6\x17\x16\xd8\x4e\x01\xb0\x86\x30\x2c\x01\x41\x69\x3e\xed\xbd\x5c\x64\x13\xd5\xef\xaf\x7e\x3c\x1e\x7e\x03\xc2\x96\x8c\xd4\xf5\x08\x7c\x32\x86\x51\xbf\x01\x41\xc2\x9e\x2b\x48\x82\x57\xd1\x7e\x12\xa6\x20\x25\xe4\x6f\xd1\x5a\xbf\xaa\x67\x7e\x3e\xe3\xa2\xfe\xf5\xd9\x1d\x64\xd5\x3e\x45\x96\x36\x69\x23\xce\xe3\xdd\xd1\x0e\x38\x6c\x07\xd0\x9b\x2d\x6a\x26\xcf\xca\x59\x6d\x02\xb7\xd1\x99\x6f\x8a\x6a\xa6\xa6\xfd\x62\xcc\x8e\xf1\xbc\x25\x0a\xd0\x82\x71\xd5\xc7\x2b\x80\xa9\x07\xcc\xa1\x31\x61\x73\x89\x99\x46\x55\x9d\xcd\x85\x50\xfd\x60\x2c\x96\xa8\x95\x5a\x9c\xed\x92\x09\x8b\x4a\xc2\xbc\x74\x49\x16\xfb\x5b\x6d\x6d\xf2\x19\xed\xcd\xa4\x2d\xd8\xac\xfe\xa2\x00\x9e\x4c\x59\x07\x5f\x2d\x69\x1e\xcb\x63\x86\x7a\xcb\xee\x2d\x03\xb6\x8b\xcc\xc7\x67\x68\x75\x2b\x4d\x54\xb5\x30\x03\xbf\x05\x36\x9b\x6b\x75\x4c\xd8\x78\xef\x80\x55\x9b\xcc\xa9\x05\x9a\xcd\x5b\xab\xba\x00\xd0\xbe\x71\x62\xe0\x28\x1b\x55\xd2\x8d\xf7\x1e\xee\x55\x1e\x75\x3b\xb9\x8a\x05\xc7\xb5\x55\xb4\x7e\x76\x43\x84\x69\x8b\xc0\x1f\x13\xd0\xd9\xf8\xe8\x98\xb8\x0a\x5c\x41\x32\x6a\x76\x67\xb2\x22\x4c\x83\xb1\x6d\xd0\xeb\x20\x45\x35\xb3\x00\x51\x17\xf1\xef\x87\x63\xb2\x84\xa2\x5a\xe7\xbb\x63\xd6\x40\x8e\x91\x93\x17\x56\x10\xf6\x7d\xbb\xb6\x49\xdb\x1a\x21\x66\xc8\x6c\xda\x98\x4d\x08\x46\xb0\xef\xc3\x00\xa6\x99\x90\x12\x54\x29\x6c\x5c\x67\x1d\xd8\xf0\xbf\x02\xe5\xca\x0c\xec\x50\x1d\xd9\xf8\xb6\xf9\xc5\x9c\xcd\xe6\xee\x34\x84\xec\x93\x33\xeb\xba\x27\xb5\xff\x31\x0a\xc3\xd3\x58\x0a\x40\xd5\x58\xea\x62\x49\x5d\x22\x79\x33\x8b\x00\x8e\xa4\x7d\x6e\x35\xc8\x45\x2d\x03\xa8\x0c\xf0\x82\x0e\x11\x5a\x62\xf7\xcb\x77\x38\x75\x3a\x8b\xbc\x24\x87\x46\x69\x11\xa6\x7b\x42\xfa\x3c\x99\xab\x60\x24\xca\xa3\x31\x39\x25\xbc\xaa\x6f\x55\xa3\x6c\x83\x6d\x19\x4b\x9d\xc9\x73\xe1\xe7\x6e\x07\x1c\xc4\x11\x53\xf4\x95\x68\xc6\x1c\x72\x02\x42\x0d\x25\x4b\x7d\xd1\xb4\x6d\x1a\xb9\xb5\x85\xfb\xbb\x09\xec\xfe\xb8\xdd\xf3\x80\x8f\x86\x1b\x79\xfe\xf3\x7e\xdc\x61\x9f\xdf\x00\x6d\xa3\x72\xdc\x2b\x7f\xf4\x3e\x32\x12\x76\xdc\x7a\xf1\xd5\x2a\x2d\x98\x63\x57\xe3\xda\xf5\x0e\x93\xd5\x68\x49\x23\x03\xec\x1b\x09\xce\x4a\xd8\x4e\xeb\xd0\x7b\x5f\xa2\x43\xe4\xe1\x4b\x4e\x76\x25\x2b\x44\x71\x9c\xac\x70\x54\x3d\x12\x43\x76\x53\xcc\xcd\xd4\x50\xf0\x1d\xd5\xd0\xa3\x1c\x22\x4b\xdd\xb4\x89\x56\x3a\x44\x24\xdf\xad\xc9\x14\x91\x3c\x9b\x54\x8c\x76\x8a\xc5\x10\x03\x5d\x4b\xd0\x88\x1d\x67\xef\xf4\x8e\x1d\x43\x8b\x57\x24\x96\x62\x2e\xaf\x86\xfa\x24\x88\xec\xe2\x30\xc8\xa6\x0d\xa1\x1d\x2d\xf5\x4c\x31\xd9\x4e\x1b\xee\x8a\x3e\xe9\x5a\x0f\x8c\xae\x09\xb8\x0e\x74\x2e\x49\xaf\x22\x84\x0f\x53\x50\x08\x73\x3b\x85\x64\xbd\x6c\xa7\xb5\x8a\x47\x3e\x05\x66\x88\xa5\xb3\x49\x34\x9d\xc4\x98\x01\xd8\x6e\x49\xad\x19\x80\x2b\x26\xe7\x34\x09\x37\x43\x4c\x7f\x5b\xca\x4e\xac\x5a\x37\xd4\xc9\xf8\x21\xdf\xf4\xcc\x1f\xd9\x31\x58\x9e\x93\x77\xc1\x56\x5c\x43\x03\x8a\x78\xff\x5c\xa3\xed\xb4\xfe\x7c\x8d\xb5\x51\x3c\xd5\x99\x4b\x03\x4a\xe1\x5a\x3e\xd3\x00\x1c\x63\x32\xa2\xb6\xd3\x7a\xa6\xc6\x00\x2c\x37\x33\xad\x9e\xeb\x5a\x0e\x91\xc1\xb5\x9d\x1e\x71\x59\xc3\x92\xd7\x76\x0f\xb3\x95\xe7\x33\x00\xc7\x6f\xb4\x59\xce\x77\xfa\xb8\xb5\x04\x83\xa8\xdf\x3a\x39\x6e\x4e\x7b\xc5\x64\x77\x72\xf4\x59\x5a\xce\xad\x3a\xc4\xae\xdb\x62\x4a\xbe\x54\x00\x56\x79\x28\x25\xf4\x0b\xcc\xee\x1c\x2f\xa6\x59\xfa\x88\x2c\x82\x1d\x07\x5b\xd7\x56\xba\xe0\x00\x3c\x37\x13\x0e\x87\x90\xfe\xb0\x94\xc5\xed\x34\xcc\xf3\xd6\xd2\x00\x8f\xdc\xf6\xa0\x22\x2f\x5b\x74\x7d\x7c\x6d\x31\x38\xff\x62\x5e\x0f\x8b\x3c\x4a\x5e\x8f\xfb\x29\x79\x3d\x92\xd7\x63\x4f\x4a\x5e\x8f\xed\x94\xbc\x1e\xc9\xeb\x11\x49\xc9\xeb\x91\xbc\x1e\x7d\x28\x79\x3d\x92\xd7\x23\x79\x3d\x92\xd7\x23\x9e\x63\xf2\x7a\x24\xaf\xc7\x67\xe5\xf5\xb0\x88\x95\x41\x40\x3a\x3f\x58\x80\x59\x50\x3e\xc5\xda\xa0\x98\x9e\x5b\x5f\x8c\xcf\xef\xee\xc0\x6c\x82\xf9\x72\x91\xc3\x95\x7b\x18\x5c\x23\x14\xc8\xd5\x95\x92\x94\xf7\x4a\xed\xee\xd2\xab\xd1\xab\x97\xbd\x92\xa3\xdb\x14\x96\xf3\xd1\xa5\x3e\x05\x63\xd7\x69\x17\xae\xf7\x51\x30\xf9\xee\x42\xae\x01\xc0\x91\x7e\xac\x1a\x24\xdf\x85\xbc\xfb\xa6\x56\x01\x1c\x17\xa0\x09\xd5\x1d\x44\x26\x5b\xc0\x71\x8f\x0a\xb3\xeb\xd4\x2e\xd5\xdd\x54\xc7\x11\xdc\xe1\x7f\x8d\x48\x86\x88\xce\x7d\x2b\x39\x5c\xf2\x00\xc9\x80\xda\x9a\x58\x13\x70\x6b\x13\xb2\x4d\x62\x01\xb6\x6c\xb3\x57\xf3\xa5\xc8\x09\x78\x99\x0a\xe0\x78\x08\xe3\xd9\x98\xe4\xb6\x04\x3c\xe5\x2e\x93\xfb\xe8\xb8\x95\x2e\x16\xb2\xf9\x74\x85\xf9\x0c\x74\x85\x1b\xe6\xf2\xc6\x60\x09\x5c\x57\xb4\xe8\x95\x73\xe6\x09\x96\x2c\xd3\x75\xb6\x1c\x96\x6b\x60\x5a\x05\xee\x7a\x94\x83\x2b\xdc\xa9\x35\xda\xd0\x9b\xfd\xcf\x6a\x9c\x0f\x6a\xe3\xfb\xc3\xee\xab\x35\x3f\x87\x5d\x8f\xb1\x73\xd0\x07\xaa\x5e\xe7\xd6\x6f\x15\x03\x36\xe3\xc3\x4c\x96\xd0\x0b\xc1\x30\x40\x15\xf6\xe1\x63\x08\x2c\x9e\x0c\x60\xab\x44\xd9\x27\xeb\xee\xf7\xaa\x28\xcc\xb1\xda\xbf\xe5\xda\x36\xda\x5c\x64\x8f\x14\x8f\x60\xe9\x11\xe6\x9b\x55\xae\x83\x99\xda\xac\xa6\x45\xab\xc6\x22\xf2\xbe\x16\xa5\x28\xc4\x6c\xd5\x96\xe2\xe0\xef\x30\xbb\xdb\xee\x8d\x4f\x89\xaa\x26\xd6\x1d\x16\xce\x73\x0e\xe4\x62\xed\x98\x25\x1c\x73\x4d\x9f\x62\x44\x2f\xe1\x98\xf7\xa1\x14\xd1\x4b\x11\xbd\x3d\x29\x45\xf4\xb6\x53\x8a\xe8\xa5\x88\x5e\x24\xa5\x88\x5e\x8a\xe8\xf5\xa1\x14\xd1\x4b\x11\xbd\x14\xd1\x4b\x11\xbd\x78\x8e\x29\xa2\x97\x22\x7a\x9f\x55\x44\x8f\x24\x1c\x73\xc2\x31\x3f\x44\xc9\xeb\x91\xbc\x1e\x7b\x52\xf2\x7a\x6c\xa7\xe4\xf5\x48\x5e\x8f\x48\x4a\x5e\x8f\xe4\xf5\xe8\x43\xc9\xeb\x91\xbc\x1e\xc9\xeb\x91\xbc\x1e\xf1\x1c\x93\xd7\x23\x79\x3d\x3e\x03\xaf\x47\x29\xf2\x41\x4b\x73\x97\x22\x1f\xac\x32\xb7\xc5\x28\x66\x62\x54\x88\xac\xee\x4f\x69\xbe\xc0\x62\x8e\x7b\xf2\x54\x74\x61\xc1\xa2\xc7\xe4\x9f\x82\x83\x2d\x97\x6b\xce\x05\x82\x2b\x85\x9e\xf7\x06\x0f\x97\x22\x3f\x54\x47\x3d\x4b\x62\xa6\xaa\xe1\x3d\x28\x55\x0d\x4f\x55\xc3\x53\xd5\xf0\x54\x35\xfc\xf1\xab\x86\xcf\xa9\xea\xdb\xb7\xd6\x93\x35\xdb\x5a\x65\xc7\xa3\x73\x6d\x5a\x97\xf2\x35\xc8\xc5\xff\xda\xa8\x21\x1e\x28\xa8\xad\xaa\xe3\x9f\x51\x0d\x71\xa3\xbc\x9c\x62\x30\x12\x43\xf7\x6a\x04\xb6\x49\x4e\x2f\xda\x7d\xcc\x5d\x2e\x16\xe4\x97\xdd\xbd\x08\x62\x6d\x23\x2a\xb8\xe4\x34\xcf\x21\x27\x25\xc8\x91\x55\x41\x82\x4c\x19\x0f\x33\x9b\x37\x77\xcf\xcb\x48\x88\x78\x3c\x69\x8d\xee\xe8\x35\x7e\xca\x42\xdd\xdd\xc1\x0f\x82\xf9\x6f\xa7\x56\x84\x25\x4d\x59\xaa\x35\xfe\x7a\xd9\xee\x60\x8e\x75\x07\x83\x67\x56\xb6\x3b\x36\x52\x33\x22\xda\x25\x18\x7c\x1b\x1c\xab\x19\x22\xbc\x82\x21\x10\x9f\x42\x30\x64\xe4\xd9\xf0\x25\xff\xa8\x40\xc6\xbd\x59\xc5\x12\x64\xe3\x72\xf7\x06\x4e\xa4\x2b\x9b\xb9\x76\xb6\x19\x55\xd6\xce\x8f\x0f\x3e\x47\x07\xff\x86\x8a\x95\x0d\x99\x43\x41\xd6\xf7\x75\x9d\x79\xbc\x3b\x42\x11\x5a\xa3\x14\x86\x09\xc7\x6d\x85\x2a\xc4\x3b\x61\xd6\xf3\x8d\xcc\xed\x1a\xcd\xd4\xe6\x1b\xc5\x06\xc6\x07\x73\x0f\x0d\xe4\x1c\xda\xa6\x0a\x06\xf0\x38\x6d\xdb\xd8\x61\x1c\xeb\xb4\x95\xa7\x16\xf6\x20\x5d\xa7\x81\x01\x15\xe4\xb1\x40\x15\x64\x0b\xb0\x62\xa0\x80\xf8\x50\xe0\x0a\x32\x24\xc0\x82\x0c\x06\xb2\x20\x83\x00\x2d\xc8\x70\x60\x0b\x32\x28\xe0\x82\x0c\x05\xba\x20\xeb\x0a\x61\x88\x25\xb3\xe4\x7c\x3e\x43\xe9\x17\xb2\x86\xe4\x18\x88\x65\xad\x5a\x1c\x9a\x63\x20\xb6\x43\x60\x42\xc8\xb0\x41\x73\x32\x28\x36\x84\xac\x0b\xce\x80\x2a\x8f\xa0\x1a\xf0\x70\x13\x42\x87\x93\x9e\xe1\x21\x27\xa4\x0b\x3b\x19\x88\xa3\xd3\xcc\x16\x7a\x32\x10\xcf\x1a\xc0\x32\x88\x6d\x64\x69\x03\xc4\x32\x10\x5f\x73\x3b\x75\xb0\x2c\xcf\xf1\x30\x0d\x85\x42\x21\xeb\x47\x69\xb0\x40\x37\x71\x06\xd4\x70\xc8\x16\xf2\x48\xe8\x16\xf2\x28\xa8\x0c\xf2\x28\x28\x17\xf2\x38\x90\x0c\xf2\x58\x68\x17\xf2\x98\x6b\x5b\xa3\x5e\x06\x62\x39\x34\x76\x86\x3c\xfe\x66\x0d\xba\x51\xd7\x73\xa6\x6a\x20\xc5\x70\x77\x29\xc2\x31\x1c\x98\x62\x28\x1d\x3d\x28\x24\x83\x0c\x0d\xcb\x20\x03\x43\x33\xc8\x90\xf0\x0c\x32\xe4\x6d\x84\x6e\x9f\x77\x58\x9c\x62\x78\x77\x92\xe5\x3b\x8c\x27\x69\x41\x4b\x73\x79\xfc\xb7\x79\x29\xe3\x79\xfc\x9f\xf8\x27\x15\x65\x52\x8d\xc9\xa9\xc7\x5f\x0d\xc9\xdb\xd5\xb2\x1b\x72\x19\xcc\x0a\x30\x45\xcc\x13\x77\x49\x8b\x21\x3c\x24\xb6\x5e\x97\x8b\xaa\x9a\xd5\x5d\x77\x02\xc6\x2b\xd1\xdb\xb9\x50\x98\xcf\xe1\x22\xaf\x03\xe8\xa5\x17\x37\xb0\x7a\x71\x3c\xec\x95\xc4\x14\x79\x71\xce\x5f\x58\x1f\xcc\x80\x78\x41\xab\x8c\x6b\x97\x91\xe0\x41\x85\xcb\xba\xf4\x02\xc7\xf6\x62\xc0\x3a\x49\xcd\x68\x61\x48\xbf\xe5\x20\x4e\x1d\x9a\xe7\xcc\xa8\x14\x5a\x5c\x0e\xe8\x01\x19\x44\x7f\x72\xba\x00\x55\xd2\x2c\x6e\x40\x1d\xb5\xd9\xb0\x8c\x9a\xa2\x87\x1e\x29\x87\x28\x18\x88\x6d\xed\x92\xb9\x1a\xd2\xf1\xa1\x05\x39\xac\xbb\x6c\xcf\xcc\x39\xd1\x47\x21\xdd\xd1\x1b\xea\x54\x39\xb3\x31\x88\x05\xd0\xc8\x98\xc6\x0b\x8f\x19\x3c\x50\xcd\x7a\x86\xc0\x46\x3c\x3d\x9b\xd4\xc6\x41\x0e\x43\x2b\xe6\x39\xd8\x69\xb8\xf6\x28\x4d\x35\x17\x55\x11\xe7\xcb\x98\x40\x03\xff\xcc\xc9\xa1\x0f\x67\x87\x00\x1c\x1a\x12\x12\x8b\x53\x76\x18\x73\xcd\x46\xc3\x70\xaf\x03\xe4\x88\x57\xf4\x25\x78\x07\x3e\xc1\x41\x00\xd8\x2e\x35\x95\x5f\x1b\x3d\x13\x67\x39\xdc\xce\x41\x76\xf6\x8b\x29\x92\xc3\x94\xf1\x48\x58\x38\x55\x44\x56\x9c\x33\x3e\x23\x82\x7b\x44\x24\x9a\x28\x51\x6c\xf1\x42\xf6\xa8\x1f\x1b\x1e\x0f\x84\x56\x35\x84\xee\xea\xe6\x4c\x45\x17\x58\x24\x3e\x1c\x24\xa6\x84\x72\x9b\xa1\x6d\x16\x01\x2f\x88\xb8\x45\xe5\x2b\x3f\x77\xeb\x10\x87\x3c\x14\xff\xd6\x10\xab\x77\x6a\x4c\xce\x50\x8d\xc7\x83\x2a\x6a\xce\xe6\xcc\xd2\xa2\x10\xb7\x71\x96\xce\x73\xaa\xf7\x7d\xfb\xec\xeb\x7d\xaf\x81\x9b\x52\xb9\xef\x54\xee\xbb\x4b\xa9\xdc\xf7\x27\x58\xee\x1b\xd9\xd9\x2b\xcf\xd7\xfd\x0e\xe0\xd9\x54\x0a\xdf\x5e\xf7\x3b\x80\x65\x53\x29\x7c\x47\xdd\xef\x00\x9e\x28\x30\xe4\x87\x39\xa0\xe6\x92\x80\xc2\xbe\xa8\x0a\xcd\xca\x26\x7d\x2f\xc8\xf4\x41\xc3\xc1\x3a\xc3\xa7\x2e\x2d\x25\x56\xeb\x6a\x61\xf3\x13\xd6\xf4\x2e\x8e\x19\x13\x02\x15\x5e\xd5\x21\xa3\x65\x63\x18\x63\x9e\x83\x2d\x8f\xed\x7d\xd9\x36\x87\x86\x7d\x9a\xd0\xef\xb7\x68\x5e\xaa\x06\x90\x18\x6c\xc2\x1c\x1a\x2b\xb8\x30\x47\x59\x28\xe8\xde\xa4\x1d\xd3\x3b\x88\xb9\x8d\x40\x2f\xc1\x27\x1f\xcc\xd8\x12\x78\x63\x77\x1f\xaa\xa3\xb0\x37\x87\x43\x46\x74\xde\x5b\xdd\x37\x53\xd8\x5a\xd4\x2f\xa1\x47\x7a\x2b\x19\x6b\xe5\x78\xf0\xf7\xc2\xee\x77\x82\xb5\xf5\x83\x98\x6e\xbc\x0f\xd0\xc6\xff\x8f\x96\x5d\xfb\xff\x04\xf1\x6d\xd2\x45\x86\xb4\xee\xad\x66\xaf\xed\xfa\xfa\x48\xb4\x2c\xf3\x10\xa5\xf4\x94\xb9\x01\x71\x2f\x88\x58\x00\xd6\x20\x70\xec\xdd\x50\x6c\x84\x53\x07\x9b\xd1\x5b\x60\xd8\x1e\x4a\x1d\xcc\x33\x1a\x82\xfd\xb9\x97\xae\x1f\x14\x72\xbd\x09\xb7\x1e\xa6\x5c\x51\x07\x6a\x3d\x6c\xc8\x61\x90\x70\xc3\xb3\xf1\xa2\x6e\x3d\x9a\xa9\x42\x5c\xaa\x10\xb7\x2f\xa5\x0a\x71\x3b\x68\xf8\x0a\x71\xc6\xf0\x1a\x04\x21\xd3\x18\x74\x1e\x5e\x3c\x10\xd3\x47\x80\x16\xff\x6b\x94\x9a\x1b\x10\xa5\xd6\x86\x11\x5b\x00\xf0\x20\x4c\x1b\x08\x71\x98\x67\x63\x9d\x3e\xdd\xaa\x75\x43\xf0\x1d\x1c\xec\xfb\xdc\x4b\xcd\x0d\x06\xf0\x6d\x81\x7b\x1b\x58\xee\x00\x7c\x3d\xfe\x3c\x15\x9d\x4b\x45\xe7\x1e\xad\xe8\x5c\xaa\x3a\x16\x41\x9f\x53\xd5\xb1\x2e\x0d\x04\x67\x7d\x14\x28\xeb\x63\xc0\x58\x1f\x0b\xc2\xba\x09\x5f\x35\x23\x8f\x9d\x7e\x03\x5d\xed\x42\x4f\xe3\x30\x38\x5b\x60\xab\x03\x60\x3a\xda\x90\xd5\x16\xe4\x34\x6e\xa8\x4d\x81\xcb\x06\x6e\x1a\x8d\xe7\x69\x2b\xda\xfa\x31\x1f\x37\x4e\x5e\xac\x1e\x09\x66\x3a\xa0\xcf\x27\xfa\xc9\x3d\x3c\xb4\x34\x5a\x87\xc5\x43\x4a\x77\xc0\x49\x1b\x48\x68\x38\x30\x64\x0d\x4a\x3a\x50\x44\x8b\x74\xaa\xa1\x0f\x09\x05\xdd\x05\x03\xb5\x50\xce\x18\x20\xcb\x10\x10\xd0\xf8\x4b\x38\xf2\xea\x8d\x16\xd6\x01\x20\x9f\x8f\x02\xf7\xdc\x05\xf5\x8c\x49\x34\x7b\x24\x98\xe7\x76\x88\x67\xd4\x45\xb3\x0d\xde\x19\x85\x4a\xb1\xd4\x82\x76\x6e\x84\x5b\x83\x99\x36\x61\xda\xf5\x90\x6b\xc4\x46\x75\x43\xb5\x9d\xb0\x6b\xdc\x3e\x0d\x0d\xc9\x1c\x1e\x8e\xb9\x0b\x8a\x19\xe3\xec\x7c\x04\x18\xe6\x30\x10\xcc\x88\x32\xc5\x5c\xb3\xe1\x4b\x15\xb7\x35\x42\xcf\x29\xb5\xcb\x7e\xb6\xea\x15\xd3\xa5\x60\x39\x29\x2b\xad\xfb\xab\xe8\xb5\x0a\xc7\xdb\x6b\x16\xf7\xe4\xd9\xa9\x70\x9c\x6a\x16\x7b\xfa\x44\x6b\x16\xb7\xe5\x35\x15\x2e\x4e\x85\x8b\xff\x35\x0a\x17\xef\x92\x7a\x5f\xbd\x38\x80\xa7\xab\x77\x7c\x4f\xf5\xe2\xc0\x45\xf5\x88\xd2\x9d\xd5\x8b\x03\xf8\xfa\x7a\xc7\x3b\xaa\x17\x07\x70\xf4\xf5\x8e\x53\xf5\xe2\x54\xbd\xf8\x01\x4a\xd5\x8b\xef\xa3\x54\xbd\x98\xa4\xea\xc5\xa9\x7a\xb1\xa5\x54\xbd\x38\x55\x2f\xbe\x97\x52\xf5\x62\x4b\xa9\x7a\xf1\xde\x94\xaa\x17\xa7\xea\xc5\xa9\x7a\x71\x0f\x2e\xa9\x7a\x71\x10\xa5\xea\xc5\x43\xf0\x49\xd5\x8b\x53\xf5\xe2\x61\x78\xa6\xea\xc5\xa9\x7a\xf1\x20\x94\xaa\x17\xa7\xea\xc5\x96\x52\xf5\xe2\x54\xbd\x38\x55\x2f\x0e\xa4\x54\xbd\x38\x55\x2f\x4e\xd5\x8b\x53\xf5\xe2\x54\xbd\x38\x66\x80\xa9\x7a\x71\x3f\x4a\xd5\x8b\x53\xf5\xe2\x9d\x4c\x52\xf5\xe2\x54\xbd\x38\x55\x2f\x4e\xd5\x8b\x53\xf5\xe2\xbd\x29\x55\x2f\x4e\xd5\x8b\x7f\xf5\xea\xc5\x11\x49\x16\x64\xad\x9a\xd9\xae\x12\xc6\x01\x6c\x5d\xd1\xe3\x5d\x25\x8c\x03\x38\xfa\x6a\xb5\xbb\x4b\x18\x07\x32\xb5\x35\x6c\xb7\x2d\xe7\x63\x55\x84\xf6\x75\x8c\x03\x58\xd6\x95\x8f\xb7\xd7\x31\x0e\xe0\x58\x57\x3e\xde\x59\xc7\x38\x80\xa9\xaf\x7c\xbc\xbb\x8e\x71\xc8\x66\x61\xe5\xe3\x9d\x75\x8c\x03\x38\xfa\xca\xc7\xf7\xd7\x31\x0e\x59\x54\x5f\xf9\x38\xd5\x31\x7e\x72\x10\x78\xaa\x63\x9c\xea\x18\xd7\x94\xea\x18\xa7\x3a\xc6\xeb\x94\xea\x18\xf7\xa4\x54\xc7\x78\x93\x52\x1d\xe3\x16\xa5\x3a\xc6\xfb\x52\xaa\x63\x9c\xea\x18\xef\x49\xa9\x8e\xf1\x76\x4a\x75\x8c\x53\x1d\xe3\x9e\x94\xea\x18\xa7\x3a\xc6\x51\x94\xea\x18\xa7\x3a\xc6\xa9\x8e\x71\xaa\x63\x3c\xc4\xf4\x53\x1d\xe3\x7d\x28\xd5\x31\x4e\x75\x8c\x53\x1d\xe3\x54\xc7\x78\xbf\xb1\xa4\x3a\xc6\x7d\x28\xd5\x31\xb6\x94\xea\x18\x7b\x06\xa9\x8e\x71\xaa\x63\x9c\xea\x18\x87\x1f\xc0\x54\xc7\xf8\x57\xae\x63\x4c\x2b\x2d\x16\xa2\xe2\xfa\x0a\xe4\x92\x65\x70\x9a\x65\xe6\xa7\x6b\x71\x03\x3d\x80\x76\xdd\x48\xcf\x3d\x2c\x7b\x4c\x8c\xf1\x9c\x65\x18\xdd\xb8\x9d\x03\x96\x09\xa6\x44\x59\x8e\x84\x5a\x96\x44\xf7\xe4\xd9\xa0\x38\x70\xde\x54\xb3\x0c\xf1\x50\x38\xdc\x3e\xeb\x6e\x57\x7a\x22\x44\x01\x74\xdf\x01\x38\x43\x14\x64\x8f\x1b\xaa\xb3\xb0\xef\x5c\x2c\xb3\x61\x44\x26\x50\x08\x3e\xeb\x77\x5d\x39\x9c\x4c\x29\xf2\x31\x79\xd3\xb0\xca\x28\x47\x95\x5e\x49\x09\x5c\xf7\xca\xcf\x9a\xf8\x22\x8c\x58\xb9\x68\x21\x96\x90\xa3\x91\x8c\x58\x30\xeb\xa4\xa0\x9a\x14\x40\x55\x1f\xe3\x54\x70\x68\xa6\x6a\xd4\x36\x25\x97\x38\x66\x3b\xd0\x09\x38\xb4\x5c\xef\x6d\xeb\x67\x61\xf4\xb6\x29\xd6\xc2\x9e\xee\x1d\x8a\x06\x5d\x86\x8e\xee\x66\x56\xbd\xce\x39\x2a\xc7\x95\xa8\xc8\x2d\xb5\xcf\x45\x59\x71\x54\xc3\xb8\x34\x66\x3b\xfb\x08\x41\xd8\x83\x21\x24\x22\x37\xc2\x9b\xb2\xc7\x07\x42\xe3\x64\x54\xce\x7a\x1b\x7f\x9d\xcd\x3a\x38\x95\xb3\xca\x22\x7a\xdd\x21\x01\xae\xe5\x0a\xc1\xa2\xfd\x75\xb2\x79\x25\xe6\x22\xbb\x31\xc2\xbb\xa0\x33\x38\x38\x50\xe4\xcd\xfb\xb7\x46\xd7\x57\x0a\x72\xc2\xfa\x17\x3a\x41\x38\x9a\xbb\x2b\x4a\x29\x96\x2c\x37\x27\xed\x7b\x2a\x19\x9d\x14\xe6\x4d\x3a\x05\x09\x3c\x24\xbd\xed\x8b\xc3\xef\x4f\x3f\xfe\x7c\x71\xfa\xfe\xec\x08\x5f\xa4\x70\x57\x52\x6e\x8e\x74\xa5\x42\xcd\xaf\x5a\xca\xcd\xc4\x81\x2f\x99\x14\xdc\x2c\x2e\xfa\xc1\x29\x59\xba\x51\xf7\xe7\x5b\x9f\x7f\x09\x4a\x14\x4b\xc8\x2d\x5a\xb4\x9e\x7d\x6f\x8e\xce\x24\x64\xbc\xac\xb4\x2f\xcb\x81\x70\x69\xa3\x63\x78\x36\xa7\x7c\x16\x72\x29\x1b\x01\x68\xaf\xab\x5a\x71\x4d\xef\xcc\xf8\xd1\x6d\xab\x32\x5a\x06\x58\x8a\x68\x78\x51\x92\x8b\xca\x6c\xf9\x17\x5f\x1c\x13\x06\xaf\xc9\x17\xad\x2f\xea\x3f\xd2\x33\x3b\x96\x96\x00\x39\xb8\x38\x2c\x41\xe2\x60\x9d\x38\xf4\x77\x85\x4b\x98\x51\x99\x17\xa0\x10\xc8\xeb\xaf\x72\xeb\xa5\x09\x14\x00\xa8\xbd\xf4\x5c\xe8\xa8\xfb\xc0\xd3\x7b\x81\x10\xe0\xa9\x78\x4d\xe6\x5a\x97\xea\xf5\xc9\xc9\x4d\x35\x01\xc9\x41\x83\x1a\x33\x71\x92\x8b\x4c\x9d\x68\xaa\x6e\xd4\x09\xe3\x46\x67\x8e\x72\xaa\xe9\xa8\xa5\xd4\x4f\xac\xd9\x3e\xca\xc4\x62\x41\x79\x3e\xa2\x4e\x95\x8c\xea\x63\x70\xf2\x5b\x67\x38\x8e\x68\xfd\x57\x8c\x8f\xe8\x48\xcd\xa1\x28\x0e\x7a\x0e\x3a\xfc\xb1\x1c\xf8\x48\x0e\x7e\x1c\xbb\xb9\xc6\x69\xe6\xb3\x5a\x11\xdb\x39\x8f\xc9\x45\x00\x16\xde\x62\xf6\x21\x6f\xae\x4d\x5c\xfb\x71\x4b\x57\xf7\x57\x1e\x5e\xb7\x9f\x5d\x5c\x7f\xfc\xdb\xe5\x87\xf3\x8b\xeb\x96\x8a\x0f\x03\x0c\x26\x15\x9f\x54\x7c\x52\xf1\x49\xc5\x7f\x2a\x2a\x1e\xf8\x32\x4a\xbd\xfb\x97\x6d\x4b\x85\xd4\xb2\xd3\x5f\xd1\x69\x81\xf8\x1f\x77\xe4\xeb\xcd\x69\x89\x51\x6f\x96\x81\x62\xf7\xab\xef\x60\x67\x55\xcf\xf8\xf2\x7b\x2a\x3b\xa0\xad\x3e\xce\x12\x4f\xdb\x36\x85\x38\x96\x84\x85\x70\xa4\x8d\xdf\x21\x34\x31\x2a\x30\xd4\x15\x8e\x28\xed\xfd\x8a\xb5\x14\x87\xf9\x34\x5f\x19\x16\x48\xe8\x08\xc2\x05\x5d\xd4\x5e\xdf\xd6\x6e\x06\x7a\x19\xbd\x0c\x8c\xc9\x7b\xef\xdc\x21\x6f\x7e\x3e\x7f\x7b\x76\x71\x7d\xfe\xf5\xf9\xd9\xc7\x30\xef\x65\x64\xec\x05\x3d\xdb\x03\x2c\xd5\xc1\x10\x96\x96\xa5\xfb\xed\xad\x50\x17\xef\x1c\xcf\xde\x92\x89\xaa\x89\x14\x0c\xb7\xa7\x6a\x43\x6b\x46\x04\xe4\x29\x5f\xd5\x1e\xdb\xad\x9a\x3d\xd4\xcf\xdd\x31\x2e\xb7\x19\x8b\x11\x8b\x5b\x6f\xfb\x36\x93\x31\x90\xef\xa6\xa1\xb9\x6e\x38\x06\x32\xde\x6a\x6e\x3a\xf3\x31\x34\xfe\x38\xa4\xd1\x69\xe9\x5e\xd3\x33\x90\x67\xdb\x60\xdd\x6e\x80\x46\xc8\x40\x2d\x5a\x5d\x33\x34\x90\xe3\x5b\x98\xd2\xaa\xb0\x3e\xc5\x17\x2f\xc6\x7d\xad\x40\x37\xac\x01\xd4\xe3\xd7\x52\x04\xf6\xd2\xe9\xa8\xc8\x2b\x4c\x87\xc3\xee\x6f\xc3\x5e\x28\x07\x2e\xe1\x26\xc6\x50\xb3\xe4\x1f\xdf\x36\xde\xe9\x5e\xd3\x11\xd0\xd3\x68\x78\x4d\x7c\xea\x47\x26\xf8\x94\xcd\xde\xd3\xf2\x5b\x58\x7d\x84\x69\x38\xc8\xa1\xbb\x97\x18\x13\x75\x68\xfe\x08\xa0\x8f\x98\x5a\xa3\xce\x0e\x31\x1c\xc9\x34\x08\x8e\x29\x3e\x6d\x28\x2e\x65\x68\x98\x3c\x9f\xe8\x1c\x9f\x8d\x8e\x6e\x36\x5a\x1f\xc5\xd3\xd7\xfa\x1f\x22\x41\x2c\x1a\x5e\x1f\x6e\x1d\x7b\xea\x9a\x7e\xce\x4c\x8e\x5c\xa0\x96\x11\xa1\xc7\x2d\xb7\x43\x24\xdb\x7b\x9c\x16\x99\xe0\x19\x94\x5a\x9d\x88\xa5\xb1\xb6\xe0\xf6\xe4\x56\xc8\x1b\xc6\x67\x23\x63\x09\x8c\xec\x69\x52\x27\x88\x57\x39\xf9\x2d\xfe\x4f\xe4\x60\xae\x3f\xbc\xfd\xf0\x9a\x9c\xe6\xb9\x6b\xe0\x5b\x29\x98\x56\xb1\x0d\x2a\x6c\x4b\xbf\x31\xa1\x25\xfb\x1e\xa4\x62\x22\x3a\x9f\xe1\x86\xf1\xfc\x98\x54\x2c\xff\x2a\xec\xd2\xf5\x34\x90\xbc\x8a\xd2\x62\x2b\x07\x94\xd9\x2b\x44\x31\xad\x22\x6d\x1e\x4f\x46\x74\x6b\x25\x6e\xac\x1e\x16\x9d\x91\x64\x74\x8e\xc7\x00\x0c\x51\xb9\x2e\x04\x78\xb1\x4e\x28\x68\xc3\xdd\xa1\x07\xcd\x25\x1a\x5a\xf5\xc9\x93\x7b\xa3\x97\x22\x7f\x4d\x54\x55\x96\x42\x6a\x45\x16\xa0\x69\x4e\x35\x1d\x9b\x93\x1b\x73\x24\x3a\x8c\x10\xb9\x76\xdc\xfc\x0e\xd1\x60\x51\xa9\xce\x35\x2b\x34\xde\x6c\x4a\xdf\x31\xe2\xec\xc6\x5c\xe4\x70\x11\x39\x7a\x64\xa4\x3a\x90\xa2\x68\x96\x9a\xea\x4a\x8d\xe7\x42\xe9\xf3\xcb\x63\xff\x63\x29\xf2\xf3\xcb\x01\xd8\x22\x1f\x15\x68\xef\x93\xe7\x64\x0c\xa1\x58\x5f\xd2\x88\x4a\x8e\xc3\x98\x44\xcd\xcd\x30\xa0\x0a\x75\x1c\x87\xb9\xf8\x55\x36\x87\x05\xc5\xff\xfc\x3a\x7a\xd1\x2c\x31\x45\x6e\x25\xd3\x1a\x38\x7a\x24\x40\x2e\x62\x75\xb2\x98\x1e\x1b\x5d\xdc\x3c\x4a\x97\xaf\x5e\x3c\x0b\x83\xae\x96\xb3\x01\xb7\xd7\xb0\x73\x7a\x75\x08\xfb\xc4\x06\x17\x8c\xbe\x1f\xa2\x62\x6c\xbb\xd4\xdd\xe9\xe5\x39\x59\x5a\x59\x7c\xf2\xcd\xf0\x35\x6f\xbe\x7e\xb4\x9b\x32\xa2\x99\xbb\x27\x77\x59\xd6\x9e\xca\xd7\x36\x4b\xa7\xae\xd7\x13\xc1\xba\x60\x0b\x86\xe1\x92\xdc\xb7\x8e\x57\xe4\xd0\xfe\x72\x9c\x95\x55\xcc\xe5\xe0\xb8\x2c\x60\x21\xe4\xea\xd8\xff\x08\xe5\x1c\x16\x20\x69\x31\x52\x5a\x48\x3a\x8b\xba\xd6\xfc\x90\x71\xa8\xcd\x4f\xf6\x2b\x63\xd4\x6f\x6b\x39\x36\x47\x1c\x53\xe3\x99\x4a\x68\x80\xa7\xde\xf2\x81\xfc\x73\xb8\x3b\xa3\x05\x7d\x98\xab\xb3\x3e\x25\x17\x03\x3f\x9a\xdf\x04\x61\x59\x37\x09\x1f\xf3\xf5\x8a\xa3\x77\x71\x29\x8a\x6a\x11\x5f\x7e\xc7\x3f\xbb\x90\x27\xf0\x25\x59\x52\xa9\x9e\xc5\x83\x30\x67\x4b\xa6\x62\xeb\x7d\x6c\x79\x0f\xc6\x57\x47\xc1\xd4\xfa\x4a\x97\x95\x76\x85\x7d\x87\x31\x8f\xe0\xae\x14\x0a\x23\x01\x83\x34\xb9\x26\x5d\x33\xe6\x55\x4c\x17\x02\x42\x4a\xaa\x35\x48\xfe\x9a\xfc\xd7\xe1\xdf\xbf\xfc\x65\x74\xf4\xd5\xe1\xe1\x8f\x2f\x47\x7f\xf9\xe9\xcb\xc3\xbf\x8f\xf1\x3f\xfe\xfd\xe8\xab\xa3\x5f\xfc\x0f\x5f\x1e\x1d\x1d\x1e\xfe\xf8\xed\xfb\x6f\xae\x2f\xcf\x7e\x62\x47\xbf\xfc\xc8\xab\xc5\x8d\xfd\xe9\x97\xc3\x1f\xe1\xec\xa7\x3d\x99\x1c\x1d\x7d\xf5\x45\xd4\xb0\x29\x5f\x7d\x88\xb8\xa0\x89\x2d\x04\x38\x40\x05\xe8\x2e\xaf\x01\x8a\x67\xdc\x8d\x1a\x67\xd7\x88\x71\x3d\x12\x72\x64\xd9\xbe\x26\x5a\x06\xd6\x9c\xb4\xe4\x05\x70\x48\x4d\xf8\x71\xa0\xa6\xbb\x7e\x6c\x8d\xa9\xf9\xe4\xca\x4a\x41\x26\x41\x3f\xdf\xc8\x83\x1d\x9f\x8f\xd8\x62\x3a\x6a\xa4\x5f\x19\xdd\x23\x9f\xbe\xf5\xf1\xb9\x86\x31\x86\x79\xac\x5b\xa9\xa9\x8f\x59\xec\xfb\x50\x8a\xc5\x98\xb4\x40\x31\xcb\x01\x1a\xcf\xba\x31\xde\x40\x64\xd1\x9c\x14\x72\xe9\x4f\x29\xe4\xd2\x9f\x52\xc8\xa5\x1f\x19\xd1\xbd\xb2\x67\xdc\xc6\x5b\x06\xa8\xe3\xf9\x3c\xe2\x2d\xc0\x97\x21\x80\x93\xad\xb8\x60\xf7\x5a\x30\xaa\xba\x14\x65\x55\x50\x1d\x00\x29\xdf\x86\x40\xdb\x04\x09\xf7\xe6\xeb\xae\xa4\x06\x8d\xd7\xe4\x96\x58\x33\xce\x6c\x47\x6f\xae\x9b\xa8\x4a\x72\x5a\x14\x84\x71\xbc\x54\xf0\x0b\x7b\xf3\xf4\x30\x34\x09\xd6\xbd\x42\x28\xd6\xbb\xc3\xbe\x10\x46\x9c\xfb\xc7\xd3\xba\x48\x41\xa6\x88\xd2\x54\x6a\xcc\x8d\xc7\x9e\x10\xfd\x41\x68\xe6\x72\x77\xa8\x2b\xc6\xeb\x4e\x12\xa4\x2e\xc1\x1d\xe2\x71\xb5\x28\xa0\xa6\x25\x50\x53\x63\xa2\xa0\x4a\x93\x40\xff\x0c\xae\xa6\xa6\x37\x88\xc7\xcc\x20\x07\x9e\x01\x36\x9b\xaf\x20\xbc\xd4\xfe\x64\x65\x76\xe4\x8c\x2f\xeb\x1c\x90\xca\xa6\x1f\x58\xb3\xc7\x7c\x67\xff\x4d\x5a\x1f\x63\x74\x36\xc5\x93\xc3\xda\x8d\x62\x71\x30\xb4\x06\xdd\x1e\xa0\xe7\xd0\x0a\xb4\x7c\xdc\x3b\x02\xd5\x4d\x1d\x03\x0f\xc8\x3d\x88\xb1\xfc\xe3\x2c\xee\x1a\x21\x16\xfc\x48\xdb\x30\xb5\x1b\x30\x40\x30\x6e\xc8\x05\x4b\x8c\x71\xfc\xa9\x42\xef\xe2\x8c\xde\xad\x06\x6f\x64\xe4\x68\x9b\xb1\xfb\x7c\xcc\xd5\xed\xa6\xaa\x37\x36\x07\x89\xff\x1e\x0f\x62\x68\x0e\x60\x64\xc6\x1b\x98\x8f\x64\x5c\x76\xb1\x3c\x43\x99\x84\xb1\xf0\x9b\x52\xc2\x94\xdd\x0d\xa0\x9d\x4e\x79\xe3\xd7\x67\x39\x70\xcd\xa6\x2c\x1c\xf3\x2d\xcc\xc0\x4a\xe0\x79\xdd\x23\x0a\xeb\xa1\xc7\x04\x78\x1b\x30\xec\x73\x4a\x93\xb1\xee\x84\xe1\x2e\x88\xab\x61\x5c\x28\xe9\x76\x48\xb7\x43\xba\x1d\xb6\xd1\x23\xde\x0e\xee\xec\x3e\xfd\xd5\x80\x15\x14\xe2\xca\x43\xbc\x6d\xd5\xd9\xc1\x13\x69\x65\xbf\xf7\x64\xf6\x3f\x2b\x4d\x11\xae\x13\xfc\xd6\xfe\x47\xe2\xba\x69\x6c\xc9\x54\x73\x9b\x69\x61\xcb\xc0\x91\x39\x9b\x85\x6c\x72\x01\x4b\x28\xdc\x63\x80\x2c\x28\xa7\x33\xdb\x25\x47\x0b\x1f\x2d\xed\xcd\x52\x48\xec\xa5\x25\x59\xde\x79\xee\xe3\xb4\xcd\x2d\x69\xd4\x45\x21\x68\x7f\xd9\x31\xcc\xa4\x28\x0a\x90\x8a\x14\xec\x06\xc8\x5b\x28\x0b\xb1\x5a\xb8\x64\xe5\x9c\x5c\x69\xaa\x8d\x7a\xb8\x02\xdd\x1f\x49\x19\x7c\x80\x71\x66\x97\x55\x51\x5c\x8a\x82\x65\xbd\xe3\x17\x5d\xd1\x3c\x47\x99\x2c\xab\xa2\x20\x25\xb2\x1b\x93\x0f\xbc\xbf\x64\x8a\x29\x39\x2d\x6e\xe9\x4a\x1d\x93\x0b\x58\x82\x3c\x26\xe7\xd3\x0b\xa1\x2f\xed\xe3\x77\x5c\x67\x99\xf5\x7f\xb1\x0a\xc7\x98\xb0\x29\x79\x8d\xfd\x84\x34\xd1\x74\x86\xee\x1c\x8f\x4e\xeb\xef\x16\x16\xb2\x33\x40\xab\xec\x6f\x99\xaa\xfd\x0f\x21\x2e\x31\xef\xaf\xe8\x7b\xb1\x6d\x1c\xd6\xdf\x22\x27\x73\xc5\xd9\x9f\x7f\x35\xd1\x2a\xd8\x14\xb2\x55\x56\xc4\xe9\xbb\xd3\x0c\xd1\xd5\xbe\x47\x25\xb4\xce\x79\xef\x65\x75\x5d\x66\x5d\x69\x44\x74\x16\x31\x4e\x6c\xa3\xd5\xb0\x6e\xbb\x8d\x8a\xa8\x67\x6b\x9d\x8c\x6a\x88\xbd\x0f\xda\xa9\x20\x0b\x31\xc6\x36\x2c\x85\xd2\x57\x9a\xca\xa0\xae\xe6\x5d\x05\x72\xe9\x59\x99\x13\x99\xd1\xa2\x08\xbc\xa4\xd9\x62\x01\x39\xa3\x1a\x8a\x15\xa1\x53\x8d\x05\x35\xc3\x2a\x11\xd6\x1c\x15\xc9\x24\xd8\x13\xe9\xfa\x71\xcc\x29\xcf\x0b\x90\x64\x4a\x59\x68\x36\xc1\x86\x53\x59\x83\x5c\x30\x8e\x9e\xdb\xd0\x64\x74\x09\xe8\x99\x36\x1c\xb2\x4c\xc8\x1c\x0b\x1e\x09\x8c\xb7\xb8\x7f\x0a\x7b\x3d\x78\x85\x8e\x96\x6c\xeb\xb2\x8d\xb0\xe1\x9b\xa9\x4f\x0a\x91\xdd\x28\x52\x71\xcd\x0a\xbb\xb8\x42\xdc\x04\xf2\x5c\x94\x05\xaa\xc6\x08\xcd\x59\xff\xe7\xa8\x3e\xd6\x23\x33\x22\x75\xf2\xdb\xe6\x9f\xf0\x17\x21\x36\x77\xe4\x4b\x2e\xf6\x1d\x07\x77\x90\x0d\x52\xb2\xfb\x03\x07\xb4\x5c\x10\x8b\x2c\x02\x2e\x7a\x4f\x0e\xde\x3c\x15\xc6\x18\x34\xf2\x5a\x17\xaf\x0d\x66\x59\x5f\xe7\x63\x72\x76\x07\xd9\x00\x15\xf0\xb1\xf5\x3b\x5e\x46\x58\x31\x93\xde\xc0\x27\xde\xda\x34\xb0\x58\x5d\x9b\x3a\xe2\xf0\xc6\xf2\x8b\xed\xad\x67\x75\xa2\x65\x55\x30\x8e\xd8\x30\x57\xcc\x2e\x8a\x2f\xe3\xca\x18\xf5\x1d\x95\x1b\x07\x01\x30\xac\x9c\xfb\x80\xe4\x4c\x62\xcd\xf5\xb8\x80\xb6\x2f\x34\xe0\xe7\x8f\xcd\x89\x44\x50\x27\xfc\x86\x0e\x0f\x4e\x0e\x8e\x36\xa2\xbf\x91\xfd\xec\xa6\xac\x00\x6b\x4e\xd9\x02\x1f\x6e\xc4\x71\x3b\xa4\x88\x62\x8b\xb2\x58\xe1\x7e\x1f\xe4\xc7\x84\xe9\x58\x69\xe2\x42\x63\xf1\x60\xb7\xfb\xae\x0a\x62\xdc\xbe\x2b\x41\xb4\xa4\xbe\x7d\x88\xe5\x68\xbe\x40\xcb\xca\x1a\xab\x91\xdb\xf5\xcb\xc1\x31\x01\x9d\x1d\x91\x5b\xc1\x0f\x34\x8a\x58\x1c\x22\xea\x5a\x90\x4a\x35\x93\xc7\xca\xca\x1c\x62\xe1\x12\xe6\x58\x96\x05\xcb\x98\x2e\x56\x68\xac\x11\x51\xc5\x49\x2a\x56\x22\xa6\xda\x57\xaa\x3c\xbb\x63\xda\x65\x18\x46\xb1\x15\x53\xf2\x12\x8d\x2b\x6b\xc0\x11\x6a\x5e\xe0\x4b\x38\x99\x03\x2d\xf4\x3c\xee\xc4\x9a\x53\xca\x05\x1f\xfd\x13\xa4\xc0\x5a\x98\xdc\x71\xfd\x2c\xda\x1a\x47\xfb\x05\x8d\xdd\xf5\x0d\x04\xbd\x0c\xc8\xfa\xfd\xf2\xd7\xeb\xeb\xcb\x6f\x40\x0f\x74\x95\x9b\x91\xf9\x14\x1f\x8c\x8a\x80\x9c\x0a\xb9\x78\xc2\x3b\x3d\x16\x04\x3b\x22\xa5\x08\x34\xee\x87\x31\x28\xe6\x42\x05\xef\x34\xd9\xd8\x6d\xa1\x34\xba\x37\x63\xeb\x78\x64\x82\x73\xc8\xcc\x1e\x77\x52\x41\xa3\x6f\xfe\x52\xe4\xe4\xfc\x72\x4c\xfe\x26\x2a\xb3\x7a\x13\x3a\xe9\xd5\x48\x60\x93\x7c\xad\x7b\x05\x9a\xbc\x30\xd3\x7f\x11\x56\x72\xb0\x21\x23\xe3\x7f\x05\x9a\x83\x54\x78\x3f\x01\x1d\xa0\xf3\x56\x24\x50\xb1\x35\xa6\xe1\x2c\xcf\x4a\x69\xb1\x20\x73\xcb\x36\xf6\x0a\x6a\x15\xd4\x74\xfa\x21\xee\xf6\x35\x7a\xcb\xfa\xb9\xb1\xd3\xa4\xbd\x83\xdc\x58\x3f\x8b\x5b\x62\x43\x4b\xdb\xfd\x8d\xe2\xe9\xb9\x4e\x40\x11\x4a\xb2\xf6\x06\x47\xf2\xd5\x02\x5d\x6c\x58\x36\x8b\xdb\xcd\x31\xc7\x37\x12\xb5\xfc\xec\x1a\xc1\x07\x95\xf0\xec\xb2\x40\x30\x61\x14\x8f\xe1\xda\xc0\xc7\xa3\xf1\xc9\xb6\xa0\xfe\x20\x12\x45\xea\x8c\xf9\xe8\x45\x27\x8f\xd0\x83\xfa\xf9\x2f\x5b\xbc\xa0\x91\xa1\xd6\xad\x8c\x2c\xc9\xb0\x59\x90\x41\x0b\x42\xb3\x0c\x54\x6c\x57\x4e\xbc\x90\x50\x5d\x29\x90\xcb\x10\xf8\x76\x43\xc3\x2c\x95\x08\x73\xf9\x7b\xda\x52\xb7\x57\x12\x5e\x2d\x26\x91\x92\x55\x57\x16\x92\x7a\xd8\xc5\x6f\x95\xd7\xbe\x88\x1f\xa6\x07\x23\x78\x53\x83\xf2\x19\x90\x57\xb1\xc6\xcb\x9f\xfe\xf8\xc7\xdf\xff\x71\x6c\x97\xd3\x7d\x43\xe4\xfb\x96\x9c\x9f\x5e\x9c\xfe\x7c\xf5\xfd\x1b\xac\x8c\x1a\x85\x5a\x89\x4d\xdf\x1d\x2e\x79\x77\xb0\xd4\xdd\x47\x4b\xdc\xc5\xf2\x3a\x51\xba\xbb\x8b\xa7\x41\x76\x46\x42\xaa\xc8\xde\xc8\x53\x21\xfd\x9b\xca\x85\x96\x62\x4b\xc3\x98\x77\xe3\xb8\x53\x3b\xd6\xa8\xb9\x27\xd5\x6f\x3a\x2b\xaf\x44\x76\x33\x90\xe7\xe2\xe0\xfa\xcd\xa5\x65\x37\x80\xf3\x82\x72\x1f\x86\x60\x7c\x29\x8a\xa5\x6d\x37\x7f\xfd\xe6\x32\x4a\x91\x8f\x0d\x07\x8c\xba\xd9\x0a\xb2\x2b\x33\x56\x5f\x9c\x24\x98\xb3\x05\xc0\xb1\x45\x69\x5b\x7d\x63\x31\x1c\x5a\x30\xa5\x23\xda\xe2\x9b\x71\x36\xc1\x76\x33\xe2\x50\xdc\x5b\xf2\xd9\x0c\xec\xb3\x39\xf8\x30\x48\x0e\x62\xdb\xf5\xd3\x76\xdf\x44\x3e\x52\x1b\xf5\xd2\x72\xdf\xc4\xa4\x6e\x3e\x43\x2b\x0a\xcd\x12\xb4\xa3\x62\x5f\x22\xc9\x8a\x4a\x56\xd4\xbe\x9c\x9e\x9f\x15\x55\x4a\xb8\xd2\xa2\x1c\x00\x91\x64\x19\x0d\x88\x47\x9a\xc0\x54\x48\x18\x02\x90\xd4\x02\x0b\xe5\x15\x2a\x4b\xca\xb1\xb6\x9e\x73\x5b\x06\xf1\x15\x1d\x70\x8f\x4d\x98\x55\x55\x36\xf7\x51\x33\x0e\x4a\x9d\x20\x8c\xa8\x2a\xc3\x80\x44\x52\x4c\x00\x41\x53\x95\x84\x63\xb3\x53\xb0\xc0\xd5\x3e\x8e\x2b\x20\x66\x96\x13\xb8\x65\x04\x3a\xb3\x61\x69\x8f\xd1\x0a\x8c\x22\x63\x97\x60\xdc\x76\xdb\xdf\xae\x8d\xd6\xca\x24\x55\xa1\x0d\xc0\x85\x24\x70\xc7\xb4\xb2\x83\x94\x40\x95\xe0\x36\xf8\xef\xb6\x94\x89\x30\xcf\x3f\x53\xa4\xa4\x4a\x41\xee\xaf\x39\xb7\x00\xf8\x45\x41\x1c\x2f\x45\x7e\x70\xa0\xda\x03\x23\x33\x49\x33\x20\x25\x48\x26\xc2\x8e\x03\x56\xb4\xcd\xc5\x2d\x27\x13\x98\x31\xae\xfc\x99\x30\x03\x76\x07\x2e\x2c\xda\x28\xc4\x0d\x20\xc4\xdc\x77\x3f\x1c\x93\x8f\x75\xc3\x8c\xb0\x9d\x9a\xfa\x02\x61\x99\x68\x3a\x0b\xb9\x55\x1d\x02\xf4\x87\xb9\xd5\x78\xcc\x2a\x5a\x04\x46\x90\x6a\x45\xe0\x13\xf2\x71\x21\x37\x36\x2e\x88\x77\x7b\xb3\x37\xd1\x7f\x31\x6b\xba\x1d\xfd\x17\xbc\xa4\x66\xf3\x1b\xf4\x9f\x39\x62\x35\x9a\x30\x6a\x51\xd7\x25\x1e\x31\x39\x40\xb3\x79\x68\xf3\xfa\x04\x4b\xdc\x4a\x09\x96\x18\x44\x09\x96\xb8\x49\x09\x96\x98\x60\x89\x09\x96\x98\x60\x89\x09\x96\x98\x60\x89\x5b\x29\xc1\x12\xf7\xa7\xe4\xe2\x4e\xb0\xc4\x04\x4b\xdc\x93\x12\x2c\xf1\xf9\xdc\x12\x09\x96\x98\x60\x89\xdb\x29\xc1\x12\x03\x28\xc1\x12\xc3\x28\xc1\x12\xf7\xa6\x67\x18\x50\x4f\xb0\xc4\x14\x50\x7f\x88\xfe\x15\x02\xea\x9e\x12\x2c\xf1\x29\xf5\x5b\x82\x25\x26\x58\xe2\x3e\x94\x7c\x36\x09\x96\xf8\x9c\xac\xa8\x04\x4b\x4c\x56\xd4\x43\xf4\xb9\x5b\x51\x1e\x3c\x77\x29\xc5\x24\xb2\x04\xe8\x25\x02\x51\x58\xe6\xb0\x74\x62\x1a\x81\x25\xf4\xc3\x1a\x93\x37\x5d\x4c\x12\x76\xcf\x70\xa5\xc4\x7a\x73\x75\x68\xb9\x06\xeb\xa7\x06\xe8\xc5\xd0\x1b\x3a\xe3\x8b\x75\xaa\x93\x52\xd8\xff\xd7\x00\x67\x5a\x88\x19\xeb\x67\x0b\xab\x8b\xf8\xab\x57\xdb\x0b\x85\xc9\xdc\x0b\x91\x09\xed\x4b\xb5\x03\x1e\xd3\x82\xb9\x04\xb1\xed\x42\x63\x06\x82\xb8\x3c\x31\xbc\x29\x12\xd6\xb2\x03\xd2\x12\xf5\xc8\xd9\x01\x67\x71\xa0\x84\xa8\x30\x66\x03\x65\x69\xc3\x51\x82\x59\xd6\x30\x96\x6d\x50\x94\x60\xae\x52\x08\x3d\x3c\x0c\x65\x07\x04\x25\x66\xa0\xdb\xe0\x27\x1e\x42\x12\xcc\x74\x0d\x7a\xb2\x06\x1f\x09\x1f\xeb\x06\xec\xa4\x05\x1d\x09\xe6\xda\x82\x9c\xac\xc3\x46\x82\x79\x7a\xb8\xc9\x16\xc8\x48\x30\x4f\x51\xe9\xc7\x80\x8b\x3c\x16\x54\x64\x70\x98\x48\x7c\xf0\x2f\x32\xf0\x17\x65\xf9\xb9\xa4\x84\xeb\xb9\x04\x35\x17\x45\x90\xb2\xee\x28\xea\xf7\x8c\xb3\x45\xb5\x30\x7a\x45\x19\xdd\xca\x96\x61\x6a\xd5\x0d\x4c\xd5\xea\xcf\x1a\x56\x36\x66\x68\x98\xb3\x1c\x64\xa0\x43\xc4\xf0\x36\x22\x85\x65\x72\xe7\x14\x1d\x37\xaa\xca\x32\x80\x3c\xf4\xfe\x6e\xfb\xcc\x7e\x3f\xae\x57\xc1\x36\x27\x0b\xd4\x83\xaf\xc2\xef\xfc\xf0\x17\x85\x6d\x08\x8e\x1c\x7e\xff\xbb\xde\x9f\x8f\x40\x19\xdd\x8f\x30\x0a\xbe\xf5\x07\x44\x17\x45\x9a\x53\x31\x1e\xaa\x60\xef\x54\xac\x0d\x17\xe3\x95\xda\x89\x22\xf2\xae\xa4\x70\x95\xd7\x45\x10\xb5\xdd\x48\xc1\x3c\xdb\xe8\xa1\x2d\x08\xa0\x08\xa3\x63\x48\xf4\xcf\x20\x38\xc0\x48\xd4\xcf\x63\x20\x7e\xb6\xa3\x7d\x50\x0f\x84\x5f\xf5\xc3\x22\x7d\x9e\xfc\xa2\xdf\x81\xee\x69\xf0\x39\x31\xbe\xac\x2e\xb2\xa7\x85\xcd\x89\x41\x34\x0d\x83\xea\x79\x36\xfd\xd6\xa3\x5c\xb9\xb1\x28\x9e\x61\x10\x3c\x03\xb7\xf8\x1e\x0c\x82\x32\x10\x6a\x67\x20\x2f\xe8\x00\x68\x9d\xc7\x5c\xa6\x78\x94\x4e\xf4\x3a\xc5\xa0\x73\x1e\x03\x99\x33\x1c\x2a\x27\x7e\x69\x22\xe2\x48\x8f\x81\xc4\xd9\x1a\x3f\x72\x0b\x16\xe1\x48\x5b\x8b\x1d\x45\x47\x67\xba\x71\xa3\xb5\xd8\x4f\x30\x57\x1f\x95\x1a\x2a\xee\x13\x15\xf3\x19\x26\xde\x33\x48\xac\xe7\x51\xd0\x32\x71\x48\x99\xdd\x28\x19\xf3\x28\x8d\x91\xd4\x2e\x42\x66\x0d\xe5\x12\x63\xb2\xc6\xa0\x63\xa2\x36\x91\x71\xa6\x19\x2d\xde\x42\x41\x57\x57\x90\x09\x9e\x07\x59\x05\x6b\xfd\x29\x5d\xcc\x7a\x4a\x94\x65\x19\x34\x33\xeb\x55\xe9\x66\xbb\xcf\xa9\x6b\x1e\x1f\x68\x4e\xba\x62\x09\x3e\x70\xe7\x0c\x4a\x42\x31\x34\x66\x56\x22\x28\xb4\x46\x9e\x59\x78\x8d\x3c\xb9\xfb\xc6\xa6\xfb\x0f\x25\x50\x7f\x15\xb7\x44\x4c\x35\x70\x72\xc8\xb8\x97\xa9\x30\x87\x78\xe3\xb8\x69\x7c\x81\xf5\x11\x0e\xe5\xf8\xea\xa5\x1f\xd4\xbf\xae\xc3\x0e\xdd\x9e\x4a\x3d\x3f\xff\xaf\x1b\xd8\x76\x07\x70\x60\x38\xd2\x3b\x8d\x3d\xf3\x69\x55\x74\x9c\xc0\x11\xce\xe4\x2e\x6a\xf2\x55\xdd\x16\x3a\x4c\x8e\x70\xce\xb5\xb6\xa3\x3c\x27\xae\xfc\x50\x2d\xa8\x41\x7c\xbd\x70\x7f\x8a\x82\x1a\x85\x03\x7d\x1c\x0c\xe8\x4e\xfc\xa7\xc5\x71\x06\xf1\xbc\x07\xfb\x69\x31\x9c\x41\x5c\xb7\xe2\x3e\xbb\xf8\xcd\x40\xef\x70\x18\xe6\x33\x79\xd2\xfb\xd1\x0e\x6c\x27\x3a\xd5\x83\xed\xd7\x2d\xb8\xce\xda\xa9\x1e\x63\x14\x0f\x80\xe9\x7c\x46\x6f\xf0\x61\x70\x9c\xe9\x0d\x9e\xde\xe0\x1b\x34\xf8\x1b\x5c\xb3\x05\x88\x4a\x3f\xcb\x07\xe1\xed\x9c\x65\xf3\xb6\x1d\xc7\x16\xa0\x88\xa8\x02\xef\xc9\x8e\xb5\xe5\x86\x37\x98\x71\x94\x5e\x85\x35\x85\xc4\x0b\xb6\x38\x31\xd7\xcb\xdf\xd5\x68\xc9\xde\x93\xa1\x8a\x50\xf2\xf6\xe2\xea\xe7\x77\xa7\xff\x79\xf6\x6e\x4c\xce\x68\x36\x6f\x37\x14\xee\x8f\x0e\xa3\x78\x5b\xa1\x92\x9a\xd3\x25\x10\x4a\x2a\xce\xfe\x51\x81\xbd\x20\x0f\xeb\xef\x3a\xea\x2f\xa9\xd1\xf8\xdf\x60\x2d\x63\x6e\x9a\xde\x1a\xa0\xb3\x71\xef\x98\xc2\x4e\xc7\xc8\xc9\x01\xc6\x44\x40\xc6\xda\x54\x8a\xc5\x7a\xba\xc1\x99\x61\x65\xcd\xe5\x20\x6b\x6c\x0e\x12\xc8\x8c\x2d\x1d\x52\xd6\x35\x39\xa7\x79\x5d\x09\xca\x9c\x5d\x23\xf2\x21\x25\x20\xe9\x04\x21\x6d\x73\x20\x1c\xb4\x39\xda\xb5\x0b\x51\x70\x15\x55\xc4\xb6\x52\xa0\x8e\xc9\xa4\x42\x30\x63\x29\xd9\x82\x4a\x56\xac\xda\x83\xa5\x45\x7f\x21\xbb\x10\xfe\x21\xb3\x6a\x96\xd4\x2e\xd1\xdb\x0f\x67\x57\xe4\xe2\xc3\x75\x7f\x47\x90\xb4\x65\x70\x11\xd5\x87\xfc\x70\x1b\x27\x60\xbe\xc1\x0a\x42\x80\xc3\xed\x94\xaf\x2c\x33\x7b\x19\x30\x45\xcc\x4b\x04\xb8\x61\x1a\x68\x0b\x39\x83\x95\xbc\x78\x39\xc6\xff\x7b\x61\xa4\x40\x1a\xe3\xca\x83\x3d\x7b\xb3\xcc\x36\x92\x02\xac\xb9\xc6\x26\x05\xe0\x32\xf4\xbf\x97\x1b\x59\x1a\x20\x27\x20\x1c\x87\x10\x88\x3f\x58\x03\x62\xbb\xd5\xb9\x34\x1b\x29\xa1\x94\xa0\x80\x07\xbd\x18\x68\x7d\xc0\x50\x28\x18\x27\x94\x18\xad\x50\xb4\x75\x45\xff\xc5\x8e\x79\x5b\x86\xbf\x2c\x47\xcd\x98\x2f\x43\x54\x5a\xdc\xf3\xb2\xf3\xdd\x61\xa6\xf1\xd6\x67\xcf\xd4\x3f\x59\x02\x4d\x5c\xab\x2a\x7c\x74\xb6\x14\xf9\x81\x22\xe7\x97\xfe\x8c\x86\x3e\x00\xae\xe7\x4c\x35\xaf\x0a\x63\xb9\xb1\xdc\x0e\xd4\x86\x4a\x43\x01\xd2\x2f\xc9\x7f\x90\x3b\xf2\x1f\xf8\xe8\xf9\x53\xd8\xe0\xe2\x1f\x17\x71\x6e\x32\xeb\x67\x38\xbf\x1c\x40\x06\x7e\x30\xba\xdf\x70\x33\x3b\xa6\x05\x99\xb0\xe0\xaa\x92\x66\xf3\xe1\x4e\x83\x34\x77\xb3\x93\xa8\x98\xf5\x0d\x7e\x76\x99\xe9\x3c\xeb\x23\x62\x63\x93\xe7\xd3\xc6\x42\x0e\x15\x66\xfd\xab\x1c\x12\x33\xd4\xbf\x0a\xa5\x2f\xac\x26\x0f\xe4\xc9\x54\x6b\xbe\xad\x91\x2f\xa8\xce\xe6\x81\x3c\x3b\x17\x95\x79\xd2\x29\xdd\xa8\xc9\x50\x07\x57\x2e\xd0\x2f\x6b\x73\x28\xe6\x2c\x50\x83\x3d\xbd\x92\x08\x87\x81\x75\xce\x40\x47\x4e\x71\x41\x02\xd7\x75\x97\x8b\x88\x84\x57\xb2\x75\x8f\xb8\x56\x79\xf8\x52\xe4\xf6\xbd\x18\xc8\xd1\x2c\x5a\xde\xb2\x55\x3a\xcf\xc6\x50\x5f\x56\xfb\xb1\xe9\xf2\x9c\x5d\xc0\x2b\x90\xa3\xd5\x46\x46\x77\x67\x94\xdb\x94\xd6\x29\x48\x89\x49\x3f\x81\x2c\x27\x2b\xc4\x54\xb1\x0c\xa2\x04\x3e\x58\x6b\x97\x52\x68\x91\x89\xc0\x62\x06\x5d\xb8\x99\x63\x85\x8b\x1c\x1e\xa6\x21\x3e\xbe\x47\xbe\x7b\x7b\x79\x4c\xae\xdf\x5c\x1e\x13\x21\xc9\xd5\x9b\xeb\xcb\x68\x64\x89\x16\xe4\xc5\xf5\x9b\xcb\x17\xbf\xfa\x52\x77\xfc\x91\xe6\x65\x36\x5a\xd0\x72\x74\x03\xab\xde\x26\x69\x9c\x31\x3c\xaa\x37\x3c\x7a\x02\x76\x35\x16\xb4\x5f\x73\x0d\x09\x34\x67\xcf\x30\x71\xdd\x1d\xc2\x66\x7c\xeb\x19\xec\xbd\x39\xa2\x7a\x58\x88\x25\xe4\xf6\x81\xef\xbf\x01\x78\x5e\x0a\x16\xf2\xac\x4b\x69\xf0\xf7\x51\x4a\x83\xbf\x9f\x52\x1a\xfc\x06\xa5\x34\xf8\x94\x06\x9f\xd2\xe0\x53\x1a\x7c\x4a\x83\xdf\x67\x00\x29\x0d\xbe\x1f\xef\x94\x06\xbf\x9d\x52\x1a\xfc\x4e\x4a\xe0\xbd\x7e\x94\xd2\xe0\x49\x4a\x83\xdf\x93\x52\x1a\xfc\xc3\x94\xd2\xe0\x53\x1a\x7c\x4a\x83\xbf\x97\x52\x1a\xfc\x7e\x94\xd2\xe0\x77\xd2\x33\x82\xe0\xa7\x34\xf8\x04\xc1\xdf\xcd\xe5\x79\x41\xf0\x49\x4a\x83\xef\xf9\xf1\x94\x06\xbf\x37\xa5\x34\xf8\x7d\x29\xa5\xc1\xef\xcf\x31\xa5\xc1\xa7\x34\xf8\x94\x06\xbf\x37\xa5\x34\xf8\x35\x4a\x69\xf0\x29\x0d\x7e\x7f\x4a\x69\xf0\x3d\xb8\x3c\x9f\x37\x78\x4a\x83\x4f\x6f\xf0\xdd\x5c\x9e\xd7\x1b\x3c\xa5\xc1\xa7\x34\xf8\xad\x14\x6e\x7a\x49\x50\xa2\x92\x59\xff\x4b\xaf\x2b\x49\x6f\xc4\xa2\xac\x34\x90\x8f\x9e\x5d\x7d\x87\xf7\x9e\xcd\x64\x65\xf3\x64\x5a\xda\xef\xd7\x87\xd9\x66\x82\x4f\xd9\xac\x92\x98\xbb\x7c\xb2\xa0\x9c\xce\x60\x94\xd9\x49\x8e\xea\x35\x1b\xd5\x63\x3c\xf9\x44\xa0\xb6\x05\x5b\xb0\xfe\xe9\xf3\x64\x63\xbf\xdf\x21\x9f\x56\x03\xfd\xd0\x7b\x6e\x41\xef\xf0\x39\x49\x17\xa2\xe2\xda\xa2\xc8\xad\x28\xd5\xab\x1c\xa6\x94\x8a\x42\xdc\x9a\x67\xdb\x73\xdb\x78\x12\x6f\x1e\x37\xe5\x00\x2e\x23\x6d\xd6\x92\x6a\x0d\x92\xbf\x26\xff\x75\xf8\xf7\x2f\x7f\x19\x1d\x7d\x75\x78\xf8\xe3\xcb\xd1\x5f\x7e\xfa\xf2\xf0\xef\x63\xfc\x8f\x7f\x3f\xfa\xea\xe8\x17\xff\xc3\x97\x47\x47\x87\x87\x3f\x7e\xfb\xfe\x9b\xeb\xcb\xb3\x9f\xd8\xd1\x2f\x3f\xf2\x6a\x71\x63\x7f\xfa\xe5\xf0\x47\x38\xfb\x69\x4f\x26\x47\x47\x5f\x7d\x11\xf8\x20\x0b\x36\x0c\x86\x30\x0b\x06\x30\x0a\x06\x37\x09\x1c\x92\x62\x80\x23\xfd\xd1\x71\x8a\x84\x1b\x18\x0b\x60\xe1\x7c\x44\x1b\x87\x3a\x88\xa3\xdc\xb8\x53\x30\x39\xd3\x8f\x37\xec\xc1\xac\x88\x58\x30\x6d\x5e\xdd\x53\x21\xdb\xb5\x2f\x8e\x09\x0b\x7b\x72\xb5\xf1\x4a\x4e\x3d\x62\xca\x08\xd5\xa1\xee\xc6\x16\x82\xb7\x95\x8c\x28\xf4\x1c\xe4\x2d\x0b\x6c\xb1\x6d\x1e\x27\xbc\xf1\x17\xa0\x9a\x1b\xe5\x30\x65\x1c\x5c\xdc\xf9\x57\xf1\xb3\x27\x35\x9b\xd4\xec\x43\x3c\x9e\x8b\x9a\x55\x90\x55\x92\xe9\xd5\x1b\xc1\x35\xdc\xf5\x76\x3b\x74\xb5\xec\x95\x63\x46\x44\x69\x33\x09\x9c\x37\xa5\xbf\x63\xdf\x66\x26\xc9\x8a\x63\x26\x70\x6f\x43\xa7\x14\x05\xcb\x56\x27\x7e\x72\x78\xd4\xe0\x4e\x9f\x3c\x92\x65\xad\xa9\xba\x69\x8e\x3d\x8c\xcc\x1b\xaa\x39\xdd\x1b\xa3\xf8\x44\xcc\x69\xb4\x32\x2f\x25\x5b\xb2\x02\x66\x70\xa6\x32\x5a\xa0\x46\x8b\xbf\x8d\x4f\x77\x70\x0e\x8d\x8c\x68\x29\x0a\x45\x6e\xe7\x60\x6e\x0f\x42\xcd\xac\xd1\x45\x95\xd1\x30\x96\x33\xca\x38\x59\x98\xad\x2f\xfd\x20\x8d\x2c\x9b\xbb\x25\xf0\x4a\x2e\xa9\x04\xae\xfd\xc0\xc6\xb6\x16\xca\x44\x88\xc2\xa5\x4a\x15\x61\xa8\xcc\x7a\xee\x2e\x7f\x93\x8b\x9f\x39\xdc\xfe\x6c\x46\xad\xc8\xb4\xa0\x61\x0a\xc6\x17\x50\x52\xa0\x37\x9a\x8e\xd7\x53\x08\xe2\xbc\x6b\xe3\x6d\xae\x4c\x20\x70\x83\x16\xb7\x74\x85\xdb\xbf\x3e\x56\x16\x78\xa5\xbd\x3a\x42\xd5\x43\x15\xa9\xc7\x9a\x93\xdf\x1d\x21\x12\xe0\xcd\xe9\xe5\xcf\x57\x7f\xbb\xfa\xf9\xf4\xed\xfb\xf3\x8b\xf0\x8b\xdb\xec\x3d\x04\x88\x67\x46\x4b\x3a\x61\x05\x0b\xbd\xaf\x37\x90\x7d\x6d\x86\xa1\xb5\x06\x68\x9e\x9f\xe4\x52\x94\x76\x0f\x64\xc5\xb1\x36\x58\x53\xb6\x23\xde\x4b\x66\xf6\xd5\x17\x0d\x43\xb1\x0c\xcb\x7f\xed\x4c\x76\x26\x29\x37\x36\x32\xfa\x64\x20\x22\xc1\xdb\x90\xac\xb8\x66\x8b\x4f\x31\xd1\x95\xe6\xc3\x24\xb9\x9e\xe6\x39\xe4\x9d\x15\x8e\x8a\x9f\x3c\x17\xa8\xfa\x1b\x3f\xa1\x55\x53\x36\x2d\x98\x33\x21\x97\x1f\xae\xce\xff\xf7\xda\xa1\x5b\x95\x11\xc9\xbb\xb1\xd6\xa4\x39\xb7\x83\xec\xff\x47\x57\x91\x20\x49\xc0\xfd\xf4\xac\x24\xa0\xb6\x6f\xe2\xb1\x33\x1f\x2b\xde\xa9\xe8\xda\xe2\x1d\xe6\x3c\x15\x39\x8c\xc9\xa5\x35\x37\x40\x0d\xc0\xb1\xb9\x90\x10\x7c\x67\xd8\x72\xcd\x68\x51\xac\x08\xfc\xa3\x62\x4b\x5a\x84\xee\xac\x16\x36\x39\xbd\x53\x0e\x2c\x0a\xa3\xa9\x05\x99\xd2\x42\x45\x5c\x28\xa1\x16\x86\x31\xf0\xde\x8b\x8a\xc7\x63\x55\x6a\x4e\x24\x07\x2e\x74\x84\x23\xdd\xcc\x08\x6b\xb6\x49\x91\x11\xeb\x77\x8b\x04\xd7\x76\x6e\x7c\x65\x33\xff\xbd\x81\x11\xe8\xd1\x72\xfb\x7d\x59\xcf\xda\x46\x0d\x2b\x15\x31\xf1\xe6\x40\x39\x03\xa3\xf1\xc4\x85\xce\x5c\x02\xcd\xb1\x8e\x48\x49\xf5\xdc\xe2\xb3\x16\x54\xdd\x40\x6e\x7f\x11\x88\xfc\x31\xef\x19\xe7\xc1\xb4\xcf\xfe\x7a\x19\xae\x43\x95\xdb\x14\xa8\xae\x24\xe0\x3b\xc6\xa5\x74\x01\xa7\x93\x22\x0c\x2d\x1b\xa5\x26\xcd\x9a\x7d\xe0\xc5\xea\xa3\x10\xfa\xeb\xba\x5e\x44\xf4\x01\xf9\xc1\xbd\x58\xbb\x21\xc1\x30\x80\x0e\x56\xd8\x36\xe3\x1c\xe1\xe6\xa2\x3a\x6a\x95\xb6\x88\x91\x68\xf3\x3a\x7b\x22\x65\x24\x2b\x7e\xaa\xbe\x91\xa2\x0a\xb2\x51\x36\x1e\x3b\xdf\x9c\xbf\x45\x5d\x5d\x85\x63\x57\x80\x6b\xb9\xc2\x2a\x4b\x9b\xf5\xd1\xa3\xde\xc6\xdf\x19\x3d\xb1\x76\xca\xcd\xbb\xbe\xe2\x0a\x02\x71\x06\xef\xe9\x8a\xd0\x42\x09\xff\x98\x67\x9c\x5c\x22\x42\xba\xed\xf2\x1b\x13\x72\x1e\xf6\x9a\x72\x2c\x27\x42\xcf\xc9\x1a\xd3\xe0\x0a\x80\x9b\xe3\xb3\x15\x69\xc2\xb3\xc6\xea\xe8\x82\x19\xeb\xfa\x30\x35\xbd\x09\xb4\x52\x4b\x09\x19\xe4\xc0\xb3\x88\x53\x31\x08\x26\xe2\x4f\x7f\x08\x3b\x55\x17\x82\x1b\x75\x16\x7d\xae\xce\x79\xce\x32\x6a\x6f\x77\xaa\x07\xb8\xe9\x10\x71\xe5\xfc\x2f\x14\xcb\x9d\x18\x65\x16\xc4\xb4\x52\x20\x31\x9e\xa6\x65\x05\x56\x90\xbe\xad\x26\x50\x80\x0e\x2b\xbc\x46\x2c\xd4\x86\xe5\x54\x03\x72\x63\x0b\x3a\x03\x42\xb5\x3f\xb8\xa1\xb6\x1e\x70\x65\xae\x3a\x1b\x50\xd3\x24\x17\x10\x57\x30\x88\x2a\xf2\xdd\xf9\x5b\xf2\x92\x1c\x9a\xb5\x3b\xc2\x5b\x7e\x4a\x59\x81\xa9\xea\x9a\x06\x61\x48\xc9\xba\x77\x6d\xea\x87\x8a\x4b\x8c\x7a\x2a\x88\xad\x90\xf6\x7a\x39\x26\x5c\x10\x55\x65\x73\xbf\xc6\x4c\xf0\xf0\x7d\x9a\x80\xcf\xba\x40\xa4\x44\x57\x15\x86\xc5\x55\x77\xaa\xcf\x18\x96\xbb\xd4\xe7\x36\x55\x18\xbc\x6b\x16\x1f\x76\x9f\x2a\x0c\x63\x6d\xd4\xe7\x20\xaa\x30\xca\x40\xf8\x4e\x81\x1c\xc4\x3e\xf8\xee\x19\xdb\x07\x6d\x77\xa8\xd1\x6b\x9d\xdd\x0c\x13\x3e\x54\x5e\x0b\xd0\x34\xa7\x9a\x3a\x5b\x23\xae\x12\x60\xb2\x38\x92\xc5\xb1\x8d\x14\xbc\x63\xbc\xba\xb3\xe0\xff\x61\x02\x17\x57\x67\xc8\x92\x64\x31\xfa\x0b\x05\x95\x96\x65\xc1\x6c\x5d\xb9\x6e\xc7\x9b\x20\x9e\xe7\x9d\x63\x74\x3c\x5c\x6c\xc1\xc6\xe5\x68\x51\x08\x63\x6e\x99\xd7\x1e\xe5\x79\x40\x5f\x13\x43\x6b\x8b\x87\x38\x21\xe8\xf4\x64\x1a\xe3\x61\x0e\x0c\xc8\x25\x05\xf0\x5c\x15\xc0\x93\x85\x99\x0a\x58\x42\x60\x29\xf0\xf5\x16\x57\x86\x13\x61\xca\x0b\x71\xb0\xe7\x1c\x87\x44\x0a\x3a\x81\xc2\x5a\xde\x56\x11\x04\x87\x41\xc9\xba\x89\xfc\x64\x55\x3c\xa4\x28\x86\xa9\x64\xf0\x51\x14\x98\x56\x40\xa3\x17\xdb\x0c\xe9\xb3\x5c\x6b\x64\x30\xc4\x5a\x5f\xaf\xca\x81\xd6\x1a\xdd\xe5\x9f\xe3\x5a\x57\x81\x86\x3e\x59\x5f\x6b\xf3\x62\x18\x66\xad\xd1\x14\xff\xdc\xd6\xfa\x96\xf1\x5c\xdc\xaa\x21\xcd\xb5\x1f\x2c\x4b\x7f\x37\x66\xa1\xd7\xbf\x66\x7c\xa6\xda\x26\x1b\x2d\x8a\x68\xa8\xc9\x36\x9b\xcd\x63\x18\x83\x7a\xc7\x11\xb7\x8d\x9b\xb6\x85\x47\x57\x85\xca\xbf\xc5\x87\xdf\x63\x07\x85\x99\x6c\xdb\xfd\x0d\xc9\x0e\xda\xf2\xe5\x71\x76\xd0\x6c\xa1\xe8\x1b\x69\x86\xaf\x19\x2d\xae\xca\xb0\xee\x00\x64\xfd\x88\x7d\xf3\xfe\xea\xb4\xcb\x36\x58\xfd\x30\xc4\xd2\x49\xeb\xe0\x34\x7c\x09\xcd\x17\x4c\xa9\x50\x80\xa6\xa1\x5b\x98\xcc\x85\xb8\x21\x87\x1e\x45\x3b\x63\x7a\x5e\x4d\xc6\x99\x58\xb4\x00\xb5\x23\xc5\x66\xea\xc4\x29\x9f\x91\x59\xa9\xf0\x0a\xe4\x8c\x17\x8c\xbb\x58\x24\xbe\x72\xb8\x56\xce\x23\x13\xcc\x13\x17\x23\xab\x57\x19\xe5\xdb\xf6\x17\x0a\x66\xe9\xc0\x67\x9b\xdb\x17\x95\xb4\x8c\x95\xf6\x1c\xca\xd4\x56\xdd\x33\x77\x5d\x51\xce\xe9\x08\x8d\xde\x60\xc6\xae\x8b\x01\x86\x18\xe7\x82\x0b\x69\xf1\x73\xb6\x68\x5d\x44\xe2\xbd\xb9\x39\x6d\x70\x17\x97\xc3\x5d\x16\x66\x55\xc2\xd7\xa0\x15\x3e\x7e\x32\x83\x65\xf3\xb4\x5f\x44\xd4\xb0\x7c\xe0\xc4\x47\x89\x8c\xed\x4d\x61\xeb\x2a\x38\xc7\xe5\x9a\xb8\x07\xb3\xc6\x63\x62\xbd\x96\x6b\x32\x19\x2e\x87\x8d\x2c\x6f\x93\xc9\x98\xe3\xe8\x64\x79\x5d\x26\x83\x59\xb6\x64\xf9\x79\xc8\x64\xed\x32\x1f\x4c\x14\xd1\x75\xee\x18\x86\x3a\xa3\x49\xb3\x54\x2d\x17\x7c\xcb\x9d\x1e\xcc\x75\x58\x37\x3c\xd9\x82\x4c\xee\xba\xe3\x23\xae\xac\xc7\x70\xc9\x93\xbd\xdd\xf2\xc1\xec\x1f\xc1\x20\x25\xf7\x1b\xa5\xc1\x3c\x1f\x29\x10\x46\xb6\x05\xc3\x5a\xba\x2e\x46\x2d\x4f\x40\xb7\x55\xdd\x82\xae\xcc\x36\xe6\x4c\x21\x16\x2a\xdc\x24\x63\x7a\xde\xbe\x6c\x3f\xb6\xd5\xc2\xa7\x79\xeb\xba\x3a\x63\x03\xf4\x7f\xbb\x6a\x71\x22\xcc\x23\x1c\x7a\xcf\xc7\x23\x22\x8c\x18\xdb\x3a\x92\x75\xed\x36\xec\x20\x8f\x75\x30\xd9\x3f\x43\x8e\x76\xb7\x89\x27\x17\x36\x75\xb6\x55\x5c\xb2\x37\x47\xd7\x4e\x2a\x27\x15\xd7\xac\xf0\xa0\xb4\x45\x59\x18\xd3\xbc\x33\xf2\xa0\xd1\x22\xbf\x56\x83\xb9\xe3\x7a\x61\xc2\x7b\xe0\xb9\xb2\x9c\xc7\xe4\xff\x56\x4a\x13\x5a\xe7\x41\xf9\xea\x73\xb8\x7f\xbd\x59\xfb\xa2\x78\x78\x7c\x5d\x3b\x4e\xf3\xf2\xc6\x42\x8e\x52\x2c\x43\x9a\x6d\xe5\x6c\x3a\x05\x9f\x03\x36\x01\x52\x52\x49\x17\xa0\x11\x8b\x1c\x06\x9a\x99\xc0\x8c\xd9\x44\x1b\x31\x25\xd4\x2c\xe4\xc1\x81\x6a\x4a\xa2\x1d\x63\x32\x4e\x6f\xae\x4c\x93\x05\x9b\xcd\xed\xf3\x9c\x50\x52\x08\x3e\xc3\x72\x37\x66\xf2\x85\xa0\xfd\xf5\x0d\x5e\x64\x42\x92\x5b\x2a\x17\x84\x92\x8c\x66\x73\x84\xe2\x50\x4e\xf2\x2a\xc8\x88\xc1\x1e\x14\xab\x91\xd2\x54\x03\x31\xaf\x71\x44\x8a\x34\xfb\xc5\x03\xd0\x42\xad\xf2\x2b\x96\x8f\x79\x30\x71\x6b\x67\x7a\xa5\xd6\xff\x44\x59\xb8\xaa\x7f\xe4\x75\x94\x4a\x28\x4f\x54\xac\xcf\xaa\xc6\x4f\x6a\xac\x98\x1a\x2b\xf6\xa0\xd4\x58\x31\x35\x56\x0c\x65\x99\x1a\x2b\xa6\xc6\x8a\xa9\xb1\x62\x8f\x01\xa4\xc6\x8a\xfd\x78\xa7\xc6\x8a\xdb\x29\x35\x56\xdc\x49\xa9\x1c\x74\x3f\x4a\x8d\x15\x49\x6a\xac\xb8\x27\xa5\xc6\x8a\x0f\x53\x6a\xac\x98\x1a\x2b\xa6\xc6\x8a\xf7\x52\x6a\xac\xb8\x1f\xa5\xc6\x8a\x3b\xe9\x19\x35\x75\x48\x8d\x15\x53\x53\x87\xdd\x5c\x9e\x57\x53\x07\x92\x1a\x2b\xf6\xfc\x78\x6a\xac\xb8\x37\xa5\xc6\x8a\xfb\x52\x6a\xac\xb8\x3f\xc7\xd4\x58\x31\x35\x56\x4c\x8d\x15\xf7\xa6\xd4\x58\x71\x8d\x52\x63\xc5\xd4\x58\x71\x7f\x4a\x8d\x15\x7b\x70\x79\x3e\x6f\xf0\xd4\x58\x31\xbd\xc1\x77\x73\x79\x5e\x6f\xf0\xd4\x58\x31\x35\x56\xdc\x4a\xe1\xa6\x97\xd2\x39\xeb\xdd\x07\xe4\x31\x4a\x9f\x3a\xa4\x65\xab\x28\xce\xa4\x9a\x4e\x41\xa2\xa9\x8c\xa3\x0c\x70\xa2\xac\xb9\x29\x7c\x69\x7f\x0f\x62\xef\xcf\xd1\x9a\x6d\x0a\xf4\x31\x56\x68\xb5\x39\xe5\x76\x78\xee\xeb\x7a\xf3\x6c\x86\xe7\xaa\x02\x61\x0f\x0c\x09\x0a\xab\x86\x72\x72\xf6\xe1\xeb\xfe\x47\x28\xb6\xde\x6b\x78\x29\x37\x5c\x8b\x0f\x3c\x8b\x4b\xdb\x68\x84\x6a\xcb\xfe\x85\xca\x56\x56\x08\x65\xb3\x52\xed\x86\x65\x73\xca\x39\xb8\x27\x62\x7f\x59\xd0\xe8\xf6\x9a\x00\x70\x22\x4a\xe0\x16\x1d\x4e\x89\x62\x7c\x56\x00\xa1\x5a\xd3\x6c\xde\x7f\xe7\x7e\x98\x03\xf7\x02\x65\x5b\xa9\xb4\x46\xac\xb4\x04\xda\x5f\xfd\xa1\x60\x49\x58\x50\x66\x87\x4a\x68\x26\x85\x52\x64\x51\x15\x9a\x95\xf5\x60\xfb\xaf\x2b\x60\x2e\xb9\xad\x1a\x59\x6f\xbc\x19\xb7\x02\x0b\x1a\x94\x15\xf4\x47\x33\xd6\xb3\x77\xcb\x2a\xda\xe5\xe8\xf1\xa1\xdc\x9f\x27\x53\x04\x16\xa5\x5e\xd5\x29\x31\x40\xa6\x4c\x2a\x4d\xb2\x82\xe1\xbb\x09\x57\x20\x24\x31\x48\xd8\xf1\x1e\xe3\x33\x5e\x63\xdb\x14\x5c\x69\xe5\x96\x3a\xa0\x96\x95\x31\x1f\x4b\xad\x6c\xc2\x45\x33\x60\x37\xd4\x9c\x29\x67\xd2\xab\xfe\xeb\x40\x7d\x31\x73\xcc\x04\xa9\x57\x1a\x8f\x46\x1e\x34\x58\x3f\x5b\xc7\xa2\x35\xdc\xe0\xbb\x00\x2b\x90\x3b\x7f\x72\xad\xac\x6d\xf5\x70\xaf\xd0\x02\x66\xbe\x99\x37\xea\x2b\xdd\x36\xaa\x3c\x50\x56\xf1\x80\x71\x58\x1a\x1d\x05\x19\xb0\x25\x1a\xc3\x46\x73\x07\xfb\x30\xfd\x44\x7f\x35\xc5\xad\x41\x2e\x18\xc7\x04\x9c\xf7\xa0\x14\x9d\xc1\x65\x40\x0c\x78\xd7\x4b\x16\xc3\xc0\x5e\xf8\xfa\x1f\x32\x3c\xae\x05\x3e\x67\x1b\x43\xb3\x81\xb6\x07\x60\xdb\x5b\xd3\x25\x0b\x3b\xdf\xba\x5d\xd6\xad\x64\x5a\x87\xa4\x5c\x29\xdb\xd2\x00\xb3\x51\xd7\x6b\x10\x1e\x1c\xa8\xa8\x32\xee\xef\xfd\x20\xed\xe0\xcc\x97\x19\x83\x91\xe7\x16\xee\x1d\xf0\x2a\x9c\x48\x06\x53\x32\x65\x9c\x16\x0e\xcd\x7d\x6c\x8b\xf6\x52\x9b\x31\xa5\x14\xc8\xa0\x66\x6e\x0e\xdb\xeb\xd7\x75\x4c\x7e\x70\x0b\xab\x65\xc5\x8d\x75\x17\xd8\x6e\x1b\x08\x17\x39\x10\x36\x25\x33\x44\x8d\x4b\x9b\x7c\xf6\x87\x97\x7f\xf9\x13\x99\xac\x8c\xe9\xde\x9b\xef\xf5\xdc\x08\x95\xa6\x45\x2d\x04\x05\xf0\x99\x91\x55\x7b\x45\x86\x20\xe7\x5b\xdd\x49\xbc\x44\x61\x43\x6b\xbb\x51\xaf\x7e\x77\x33\x09\x36\xe8\xf0\x3d\x74\x92\xc3\xf2\xa4\x25\xbf\xa3\x42\x04\x64\xe8\x6e\x76\x2b\x0f\x4b\x17\x0b\x78\x22\x6f\x51\x35\xd8\xe6\x31\x4a\xd9\xf8\xea\xe5\x64\x2e\x6e\x51\x56\x5a\xdf\xd2\x7b\x71\xbc\x30\x34\xf9\x5f\xa5\x28\xab\xc2\x66\x15\x7e\xcd\x02\xdc\xa9\x28\x09\x95\xb3\x3f\x3b\x65\x6c\xb6\xea\xdd\x10\x3b\xc4\x0f\x71\xcd\x6a\xb6\x47\xdb\x4f\xa9\x37\x63\xe1\x6a\x01\xb8\x98\x4a\x5d\x80\xbc\x92\x30\x26\x5f\xd3\xa2\x98\xd0\xec\xe6\x5a\xbc\x13\x33\xf5\x81\x9f\x49\x19\x10\xca\xef\xac\x4d\x41\x8d\x55\x36\xaf\xf8\x8d\xed\xc8\x1c\x6a\x44\x14\x62\x46\x44\xa5\xcb\x4a\xfb\x5c\xe2\x2d\x2a\xbf\xbf\x6a\x63\xb6\x8c\xa2\x35\x26\x9d\xc9\xd7\x5a\x6c\xb8\x63\x21\xea\x0d\x53\xf8\x29\x27\x60\xd6\xcf\x26\x59\xb5\xc7\xaf\xbc\x02\x09\x11\x8b\xdf\xbd\xfc\xc3\x9f\xad\x7a\x24\x42\x92\x3f\xbf\xc4\xe4\x38\x75\x6c\x2f\x52\x63\xb3\x84\xdc\x73\x6a\x41\x8b\x02\x64\x37\x2c\x66\x0e\xc6\xd8\x29\x96\xfe\x1a\xbe\x51\x44\xbf\x9a\x1e\xd2\x71\x2a\xe7\xd1\x5c\x1f\xd7\xd7\x7f\x43\xbf\x07\xd3\x0a\x8a\x69\x80\xad\x5b\x28\xd1\x34\xf1\x39\x40\x23\xf5\xc0\x61\x7e\xcd\x9b\xec\x53\x72\x28\x2c\x45\x51\x2d\xe0\x2d\x2c\x59\xd6\x3f\x34\xd3\xd9\xae\x0e\x27\x5f\x26\xa9\x60\x01\x21\x16\x31\x25\x93\x42\x64\x37\x24\x77\xcc\x1a\x38\xb5\xb3\x54\xc2\x8d\x86\x56\xd6\x3a\x16\x0c\xf1\xf9\xe5\x61\x2b\x1e\x02\x67\x0f\x84\xb1\xef\x5c\xeb\x28\x00\x3b\x25\x0b\x5a\x96\x75\x71\x04\x49\x6f\x3b\x4b\x1f\xc0\xd1\xe8\x5a\xc6\xdb\xef\xc1\xfe\xc7\x21\x2a\xc0\x19\x1e\xde\x1c\xb9\x59\x07\x59\x08\x81\xf8\xf6\xb8\xb8\x68\x33\xde\xb0\x80\x4e\x47\xa8\x1a\x66\x71\x79\xe2\x25\x72\xb0\xc9\xc2\xeb\x35\xa5\x42\x23\x9a\xbe\x3e\x8c\x1d\x63\x6d\xf8\x1b\xe1\x0d\xce\xb1\xd0\x22\x2c\x78\x12\x19\xbb\x0a\xcf\x21\xe8\xec\x16\xc6\x9f\x31\x36\xb8\xa0\xda\x3e\xd9\x03\xd7\xc1\x97\xb2\xa3\xa4\x04\xa9\x98\x32\xb6\xf3\xf7\xa8\x60\xde\x14\x94\x85\x85\x83\xea\xf0\x40\x48\x57\x7b\x12\xb3\xc8\x56\x35\x62\x47\xbd\xb8\xfb\xec\x52\xe4\x8e\x19\x5e\x40\xb6\x9b\x22\xe3\x01\xf5\x8a\xd7\x0a\x05\x74\x12\xfe\x9f\xc8\x9c\x7b\xaa\x8b\xeb\xfb\x66\x77\x62\xef\x2d\xc3\xa3\xbe\xb8\x2c\xdf\xfa\xf2\x09\xe0\xf8\x29\x5e\x57\xb8\x06\x9f\xce\x6d\x55\x0f\x77\x00\xf5\x87\xd7\x94\xdb\xee\x21\x6e\x98\xc6\x73\x3f\x07\x77\xec\x1b\xf7\x44\x20\x4f\xe7\xad\x1c\x13\x0b\x67\xe4\x42\xfb\x61\x92\x83\xd7\x07\x4f\x72\xf9\xd8\x2d\x90\xa2\xa4\x33\x7c\xa9\x0f\xb0\x13\xeb\x2c\x83\x71\x43\xd6\x81\x00\x0a\x5d\x4c\xc8\xd5\x62\xd4\x4b\xc7\x3b\xf8\xa2\xc7\x28\xb2\x4f\x5d\x70\x98\x2d\xe7\x50\x08\xed\x85\x62\x95\xba\x2d\x25\x77\x4b\x57\x84\x4a\x51\xf1\x7c\x6c\xe3\x8b\x21\x0a\xdd\x12\x86\xbe\xdf\xaf\x2d\xe8\x85\xe0\xc1\x98\x6a\x5f\x0a\xbd\x5b\xe1\x18\x1f\x3e\xc1\xf5\x47\x5f\x8d\x5f\xbd\xfc\x94\x2d\x27\x5c\x8b\x41\x2c\xa7\x8b\xda\x72\xb2\xf7\xcf\x93\xac\x8a\xef\xb9\x3b\xc0\xca\xbc\x77\xe1\x95\xba\x3d\x6e\xa8\xed\xe7\x1b\x49\x22\xa3\x5b\xc9\xb4\x3b\x2b\xb7\xac\x77\xc8\xcd\xd3\x21\x3a\x46\x88\x90\xed\x4a\xaf\x47\xd1\x89\x48\x31\xfd\xc3\x63\x7b\xf2\x11\xa2\xaa\xc9\x23\xdd\x87\xf6\x0a\x8b\x51\x98\xdb\x22\x81\x2a\x8e\x6f\xe3\xef\xaf\xaf\xc6\xf8\xed\x7b\xf1\x82\x1c\xda\x51\x1d\x28\xac\xd0\x75\xf4\x24\xc7\xd0\x6d\xe5\xd9\x5d\x19\xd8\x79\xa5\xb3\x9d\x67\x77\x25\xc5\xd8\x63\xd9\xec\x6b\xc4\x45\xe5\x0c\x9a\xdd\xfb\x1a\xc8\x7b\xdd\x50\x6a\xf6\xf5\x3f\x61\x4e\x97\xc1\x55\x0b\x14\x5b\xb0\x82\xca\x62\x65\x36\xf8\xca\xae\x2c\x99\x54\x9a\x00\x5f\x32\x29\xf8\x02\x82\xeb\x5c\x2f\xa9\x64\x58\xab\x5c\x02\x16\x1a\xcd\x40\x91\x2f\x0e\xbf\x3f\xfd\x88\xa0\xda\xd0\x82\x63\xc6\x4e\x01\xbf\x67\x95\xc2\x74\xc9\x41\x56\xb8\x35\xe1\x6e\x34\xe0\x45\x68\x4d\x95\xf5\xc3\xe2\xd7\xd7\x48\x6e\xe8\xec\x79\x5e\xef\x92\x59\x89\x45\xa5\x2b\x5a\x60\x01\xba\xac\xa8\x14\x5b\xfe\xda\x37\xa3\x2b\x1f\xf8\x96\xf5\x3e\x8b\x6b\x65\x13\x1b\xc5\xe7\x58\x36\x95\x05\x83\x8a\xfb\x22\xbe\x71\x47\xcf\x43\x0f\xc0\x0b\x90\x15\xdf\x06\xdd\xfb\xc5\x8c\xd1\xe7\x42\x50\xb6\x4c\x6d\x88\x83\x21\x13\x7c\xca\x66\x95\xb4\xd5\xbf\xd7\x7a\x1a\x2f\xe8\x2c\x60\x53\x37\xa3\xe2\x8f\x1f\x8c\xca\xb9\x7a\x83\x33\xd9\x5f\x14\xba\xb9\xd4\x9d\xb2\xa2\x6f\x2f\xae\x5a\x35\x89\x7b\x8c\xde\x3a\xd8\x44\x3e\x26\x97\x4d\x49\xe3\xa6\xbe\x3a\x36\x92\xe9\x59\xe0\xd9\xe8\x5b\x90\xb3\xa6\x23\xe7\x0c\x38\x48\x2c\xd6\x63\x86\xe9\xf7\xaf\xef\xab\x6c\x42\x95\x85\x05\xbe\xbd\xb8\xb2\xc8\x81\xfd\x77\x29\xc8\x15\x12\xe6\x55\x30\xaf\x01\xd7\x83\xa2\xdf\x21\xef\xec\xee\x29\x86\xa8\xcc\xee\x98\x25\x43\x57\xa8\x65\xda\x53\x30\xcf\x2f\x09\xcd\x73\x89\xb0\x37\xf7\xdc\xf2\x87\x91\x96\x25\xe2\x88\xfa\x8a\xba\xdd\x53\xb3\x1b\xed\xb9\xb6\x36\x39\x00\x52\xd7\x6c\x29\x79\x5b\x95\x05\x42\x3a\xf2\x0e\xfb\x80\x22\xe3\x58\x62\x7c\x21\x96\x7d\x8f\x73\xa8\x2b\x32\xc8\x11\x19\x78\x97\x88\x90\xd6\x6e\xf7\x48\x98\x04\x25\x8a\x25\x48\xcf\xb8\xaf\x02\xec\x48\x96\x3b\xfd\x75\x87\x02\x94\x15\xc7\xb8\x27\xdf\xae\x54\x6d\x97\x93\x9e\x2c\x81\x6b\x69\xd4\xa6\x1f\xad\x97\x11\xf2\xd1\xac\x41\x15\xe0\x2b\xf2\xad\xe6\x66\x6c\x09\x98\x11\xe0\x1a\xef\xd9\xaf\x10\x4b\x90\xb2\x7f\x1d\x62\x3d\xb7\xb8\x75\xdb\x93\x10\xa8\xf4\xb7\x1d\xae\x66\x80\x0e\x24\xbf\xb6\x68\xaf\x87\x2d\xde\x5e\x5c\xd9\x1b\xcf\x2e\x8f\xb1\x0e\x18\x0f\xb0\xc5\xb7\xc9\x6b\x73\x8f\x05\x9d\xbf\x5f\xbd\xfa\x79\xa8\xcf\xa8\xdb\xdd\xd5\xf9\xe8\xc3\xf3\xbe\x03\x9f\x95\xc1\x95\xb4\x02\xbf\x55\x01\x95\xd9\xbc\xef\x3a\xdf\xa3\xee\x2c\x43\x92\x0b\x44\xb1\xf7\x9c\xc8\x54\x48\x74\xd9\x8e\xf0\x5a\x2e\x84\xb8\xa9\xca\xe1\xef\x56\x37\x44\xf3\xd0\x7e\x94\xcb\xb5\xc3\x3f\xdd\xae\x35\xe5\x5c\xf5\xc5\xa5\x76\xed\x72\xd0\xd6\x1a\x47\x26\x4d\x9d\x0b\x11\xe6\x55\x32\x4f\xea\x37\x45\xa5\x34\xc8\xaf\x99\x54\xfa\xc5\x98\x7c\x4f\x0b\xe6\xca\xc5\xd9\x48\xc4\x41\xfb\x0f\x7e\x60\x7a\xfe\x57\xa1\xf4\x05\xe8\x83\x3e\x18\xb2\x0e\x93\x83\x63\x72\xe0\x06\x7b\x40\x84\x24\x07\x17\x82\xc3\xc1\xd8\x4c\xac\x07\xcb\x56\x93\x94\xfa\x6a\xac\x2f\x81\x35\x6b\xa1\x07\xdb\xda\xae\x70\x4b\xac\xa0\x80\x4c\x7b\x83\x23\xe8\x66\xbc\x16\x64\x4e\x97\xf6\x19\xe5\x6f\x14\x05\x9a\x50\xec\x9f\x82\x7c\xe7\x76\x4d\xcd\x7b\xbb\xcf\xb2\xae\x44\x65\x59\x6b\xe1\x9e\x55\xab\xb6\x78\x34\xf5\xda\xfb\xc9\xc4\xce\x2d\xef\xfb\x26\xea\x75\x36\x6c\x43\x94\x2b\x90\x4b\x96\xc1\x3b\xc6\x6f\x7a\x1c\xc8\x6e\x8e\xc8\xd9\x06\xa7\xa0\x36\x4d\xb7\x0e\x8b\xc9\xb8\x4d\x86\x35\x66\x05\x9d\x88\x4a\xe3\x6b\x09\x41\x7a\xd6\x09\xd8\xef\xf9\xca\xf8\xff\xb5\x12\x85\x09\x1d\xa5\x6d\xcd\xd3\x72\x79\xd5\xde\xba\x5e\xb9\x58\x18\xd6\xf1\x2e\x38\xb5\xe2\x9a\xde\xe1\x7d\x24\xb2\x1b\x90\xa4\x30\x8b\x30\x26\x75\xf2\x4c\x0f\xc6\x9d\x7e\x76\xb2\x82\x1e\x98\xfe\x90\x48\x01\x94\x73\x58\x80\xa4\x45\xed\x87\x0a\x15\x83\x77\xee\x4a\xae\x39\xb6\xb2\x28\x7a\xcc\x5f\x56\x2e\x0f\x97\x29\xab\x67\xcf\xb6\xf0\x23\x8b\x5e\x17\x8e\xb9\xde\x2c\x5b\xca\x09\xdc\x31\x85\xa0\x8e\x52\xe4\xed\xaa\x55\x95\x02\x39\xaa\x6b\x95\xf5\xe0\x4e\x5d\x47\x09\x9f\x78\x93\xc3\xa4\x9a\xcd\x18\x9f\x39\x3b\x02\x6d\x95\xde\xad\x90\xda\x0d\x6b\xb0\x7d\x14\xc9\x24\x50\x6d\x2b\xd8\x94\x22\xb7\xd9\x8a\xac\x0f\xcb\xac\xf6\x8c\x2d\x44\x6e\x19\x4f\x56\xd6\x4b\xe6\x25\xd9\xac\x89\xf9\xda\x3e\xfa\xf6\x9c\x13\x21\x5d\x45\x62\x9a\xe7\xb8\xc6\x9b\x7b\xd6\xaf\x09\xfb\xda\x3e\x1d\xd7\xb9\x05\xe6\xfc\xaa\x86\x7f\x90\x88\xa9\x6a\x62\x9e\x1c\x95\xcc\xd6\x7b\xa0\xae\xf7\x32\xed\xc1\x74\x77\x27\xde\x88\xee\xa5\x67\x9b\xa7\xb3\x3f\x98\x38\xc4\x48\xeb\x6d\xa0\x75\xed\x73\xbe\x65\xe4\xb8\xbc\xbd\x4c\xbe\x96\xec\x98\x15\x74\xcd\x16\x69\x8e\xf9\x72\xb0\x28\x85\xa4\x92\xf5\x0c\x20\x1b\x19\x5d\xd3\x01\xc6\xaa\x0b\x3e\xfc\xf6\xf8\x2f\x99\x79\x39\x6e\xd3\x00\xf5\x3a\x84\xcd\x5c\x59\x7b\x83\x0b\xe2\x25\xd6\x58\x70\x2a\x9b\x43\x5e\x15\x7d\x5f\x7b\xb3\x8a\x4a\xca\x35\x80\xaa\x53\x9d\x57\x2e\xf1\xd5\xaa\x85\x3a\x65\xb7\x5f\x20\xc4\x28\x27\xe4\x05\x77\x4c\x63\x7b\x39\xf3\x1b\xd4\x52\x36\x11\x18\x5f\x16\xbd\x78\x0a\xb9\x96\x3f\xbc\x5d\xa9\xf4\x5b\x54\x5a\x29\x63\x64\x7b\xc5\x0f\x77\x19\x18\xb3\x40\xab\x66\x71\x5d\x06\x07\x13\xbc\x5f\xca\x86\xd7\x9d\x4e\x46\x61\xc9\x32\x1c\xf9\xb6\xcb\xab\x17\x63\xc3\xd0\xed\x8e\x15\xfc\xc9\xca\x85\x87\x8a\x46\x75\x07\x0d\x15\xd5\x3c\xb9\x36\xfb\x56\x97\xff\xc1\x6f\x58\x32\xda\x1b\xaf\xb2\x53\x2d\xb7\x95\x6d\xbf\x25\xed\x4a\x68\xd7\x6d\xe6\xc7\x4f\x04\xef\x99\x40\x80\x53\xdc\xdd\x56\xd0\xb7\x09\xec\xc5\xd3\x65\xb6\xde\xa3\xac\x7b\xb7\x60\x0d\xf4\x64\x85\x00\x3d\x7b\xc3\x34\x43\x5d\x65\x54\xce\xe2\xdc\x8e\x07\xa7\x72\x56\x2d\x30\xeb\xd2\x39\x57\x9a\x8e\xd4\x61\x79\xc3\xb9\x35\xd7\x31\xd2\x68\x9e\x05\x6f\xde\xbf\xc5\x8e\x53\xca\xe8\x85\xfe\xdd\x07\xb5\x93\x2a\x73\x60\x5d\x73\xd2\xdc\x3c\xf1\x37\xd0\x00\xbd\x39\xb7\xd0\x03\x3b\x70\x00\xbd\x59\x76\xd2\xda\x3b\x10\x00\xa3\x6e\xeb\x57\x51\x7f\xbe\xb5\x95\xe9\xfc\xba\x2e\x06\x5d\xcf\xbe\x37\x47\x77\xe8\x19\x2f\xcd\x5b\x10\xdf\xb8\x4d\x14\x9a\x67\x73\xca\x67\x21\x7e\x53\x23\x00\xed\x75\x75\xaf\x38\xd7\x64\x16\x54\x46\x43\xf2\x3e\x6c\x1e\x28\xc9\x45\x65\xb6\xfc\x8b\x2f\x8e\x09\x83\xd7\xe4\x8b\xd6\x17\xf5\x1f\xe9\x99\x1d\x4b\x1b\x4e\xd2\xaa\x57\x31\x69\xc4\xa1\x7f\x9e\xa1\x84\x19\x95\x79\x81\xb5\xee\xa6\xf5\x13\x1c\xf1\x3c\xa1\x02\x80\xc6\x15\x66\xaa\x72\xa1\xc7\xf1\xf1\xf8\x7d\x4b\x40\x6b\xaa\x6e\xd4\x89\x7d\xee\x8f\x72\xaa\xe9\x88\x96\xd6\x43\xca\x04\x3f\xb1\x01\x8a\x91\x6b\x7c\x38\xa2\x4e\x95\x8c\xea\x63\x70\xf2\x5b\x59\x61\x0f\xdf\x11\xad\xff\x8a\xf1\x11\x1d\x61\xcb\xbc\xb0\xcc\xfa\x5f\x31\x05\x22\x38\x06\x10\xd8\x03\x74\xdd\x05\xe4\x15\xb1\x9d\xf3\x98\x5c\x04\x00\x94\xeb\xae\xd7\x75\x82\x9f\x6b\x57\xd8\xe8\xea\xfe\xca\xc3\xeb\xf6\xb3\x8b\xeb\x8f\x7f\xbb\xfc\x70\x7e\x71\xdd\x52\xf1\x51\x45\xc4\x92\x8a\x4f\x2a\x3e\xa9\xf8\xa4\xe2\x9f\xbd\x8a\x07\xbe\x8c\x52\xef\xb5\x6b\x77\x9b\xd7\xbc\xff\x34\x3a\x8d\xe4\x5a\x59\xf3\xb5\x18\xf5\x66\xf9\x49\xe6\x29\x9e\xf1\xe5\xf7\x54\x12\x09\xa5\x04\x85\xaf\x9a\x20\x7c\xff\xb6\x4d\x21\x8e\x65\x58\x02\x0e\x6d\x40\xa9\x9f\x50\xa2\xe2\x13\x24\x1b\x0e\x94\x2c\xe4\x53\x7c\xec\x8b\x76\x28\xf0\x79\xdd\xe0\xc0\xec\xe7\xcf\xe7\x6f\xcf\x2e\xae\xcf\xbf\x3e\x3f\xfb\xf8\x24\x99\x0b\x11\xed\xda\xba\x86\xe6\x10\x96\x96\xa5\xfb\xed\xad\x40\xa6\xb6\x7a\x32\x2c\x99\xa8\x94\xc3\x44\xe5\x03\xee\xa9\xda\x44\x49\x87\x67\x17\x52\xbe\xf2\x91\xd5\xed\x9a\x3d\xb4\xfa\x77\xc7\xb8\xdc\x66\x2c\x46\x2c\x6e\xbd\xed\xdb\x4c\xc6\x40\xbe\x9b\x86\xe6\xba\xe1\x18\xc8\x78\xab\xb9\xe9\xcc\xc7\x40\x96\x83\x1a\x9d\x96\xee\x35\x3d\x03\x79\xb6\x0d\xd6\xed\x06\x68\x84\x0c\xd4\xa2\xd5\x35\x43\x03\x39\x76\x13\x5e\x02\x1b\x22\x0c\xa1\x1e\xbf\x96\x62\x31\x80\x8a\xbc\xb2\x91\x0c\x8f\x55\x1a\x4e\xf9\x1c\x28\x3b\xce\x18\x43\xcd\x92\x7f\x7c\xd7\x05\xdd\x0d\x37\xac\xc7\x16\x73\x39\x45\xb4\x9f\x8d\xef\xdc\x6a\x13\x21\xde\xd3\xf2\x5b\x58\x7d\x84\xe0\xde\x07\x1b\xf8\xb3\x02\x32\x63\x18\x92\x1b\x08\x4d\x94\x25\x3e\x31\xe4\x8d\x1f\x62\x78\x07\xee\x67\xd3\xe7\x37\x66\x39\x86\x69\xd3\x7b\x03\x81\xd9\xd0\x9e\x36\xda\xcf\xde\xc0\xaa\x1f\x48\x62\x1b\x59\xfc\x5c\xf8\x0e\x93\xe1\x5a\xf4\x0e\xdc\xc8\xf8\xc0\x99\xc9\x91\x0b\xd4\x32\x22\x74\xbb\x8d\x44\x24\xdb\x7d\x9a\x50\x88\xa5\xb1\xb6\xe0\xf6\xc4\xe5\xf9\x8d\x8c\x25\x30\xb2\xa7\x49\x9d\x60\x82\xcc\xc9\x6f\xf1\x7f\x22\x07\x83\x6d\xa8\xc8\x69\x9e\xbb\xea\x11\x95\x82\x69\xd5\xbf\x0a\x6e\x97\x10\x23\xa3\xc6\x84\x96\xec\x7b\x90\xaa\x77\x88\x7c\x93\x6e\x18\xcf\x8f\x49\xc5\xf2\xaf\x42\xbb\x10\x59\x1a\x48\x5e\x45\x00\x5a\x6f\x93\xb6\xe4\xf5\xad\x22\x6d\x1e\x4f\x46\x74\x6b\x25\x4e\x6c\x71\xc9\xd8\x2d\x68\x45\xff\xdd\x73\x65\x80\xad\x08\x2f\x56\x40\xbc\xa0\x0d\x77\x87\x1e\x34\x97\x28\x72\x8e\xbb\x46\x1d\xfa\xe0\xb5\x6f\xfa\xa6\xc8\x02\x34\xcd\xa9\xa6\x63\x73\x72\x63\x8e\x44\x87\x91\x2a\x69\x06\xc7\xcd\xef\x0a\x3a\x81\x22\xa0\x5b\xc0\x16\xf6\x68\xbc\xa1\xc7\x54\x1d\x5b\x14\x08\x17\x39\x5c\x44\x8e\x1e\x19\xb9\xe7\xe4\x69\x96\x89\x8a\xeb\x68\x96\x58\xf6\x78\x3c\x17\x4a\x9f\x5f\x1e\xfb\x1f\x4b\x91\x9f\x5f\x0e\xc0\x16\xf9\xa8\xe0\x06\x68\xcf\xc8\x18\x42\xb1\x0e\x2a\xd1\xe5\x69\x18\x93\xa8\xb9\x19\x06\x54\xa1\x8e\xe3\x30\x17\x3f\xb6\xbf\x46\x74\x13\xf9\x3a\x7a\xd1\x2c\x31\xd5\xd4\xf3\xe7\x58\xa0\x3a\x56\x27\x8b\x69\xd3\xe5\x0f\x1f\xa5\xcb\x57\x2f\x9e\x85\x41\x57\xcb\xd9\x80\xdb\x8b\xf5\x1c\xac\x5e\x1d\xc2\x3e\xb1\xc1\x05\xa3\xef\x43\x9b\x39\x75\xa9\x81\x60\x9f\x5e\x9e\x93\xa5\x95\xc5\x27\xdf\x0c\x0f\xa8\xfb\xfa\xd1\x6e\x4a\xff\x0d\xf1\x97\x65\xed\xa9\x7c\x6d\xf1\xd1\x9e\x73\xcc\x29\xc1\x52\xea\xb6\x8e\xbd\xd1\x9f\xa0\xb4\x22\x87\xf6\x97\xe3\xac\xac\x62\x2e\x07\xc7\x65\x01\x0b\x21\x57\xc7\xfe\xc7\x1a\xd8\x38\x52\x5a\x48\x3a\x8b\xba\xd6\xfc\x90\x71\xa8\xcd\x4f\xf6\x2b\x63\xd4\x6f\x6b\x39\x36\x47\x1c\xea\xc3\x24\xae\xf2\x4c\x56\x49\xf3\x64\x2a\x56\x4d\xbb\xdb\xcf\xe1\xee\x8c\x16\xf4\x61\xae\xce\xfa\x94\x5c\x0c\xfc\x68\x7e\x13\x59\xb3\xd2\x13\x3e\xe6\xeb\x15\x47\xef\xa2\x2b\x5c\x1b\xfb\x0a\xf4\xcf\x2e\xe4\x09\x7c\x49\x96\x54\x06\xb5\x60\x6c\x68\xa0\xfb\x2e\x67\x4b\xa6\x44\x60\xa9\xad\x9a\xc9\xd6\x3a\x2f\x03\xd8\x32\xae\xb3\x84\xcd\x95\x1b\xc6\x3c\x82\xbb\x12\x3b\x80\xd5\x3a\x3a\x76\x6f\x3b\x66\xcc\xab\xd0\x6a\x52\x96\x4a\xaa\x35\x48\xfe\x9a\xfc\xd7\xe1\xdf\xbf\xfc\x65\x74\xf4\xd5\xe1\xe1\x8f\x2f\x47\x7f\xf9\xe9\xcb\xc3\xbf\x8f\xf1\x3f\xfe\xfd\xe8\xab\xa3\x5f\xfc\x0f\x5f\x1e\x1d\x1d\x1e\xfe\xf8\xed\xfb\x6f\xae\x2f\xcf\x7e\x62\x47\xbf\xfc\xc8\xab\xc5\x8d\xfd\xe9\x97\xc3\x1f\xe1\xec\xa7\x3d\x99\x1c\x1d\x7d\xf5\x45\xd4\xb0\xa3\x5a\xe1\x5a\x1a\xa6\x21\x6e\x97\x57\xf4\xe1\x78\x84\xe6\xb8\x0d\x79\x01\x1c\x52\x13\x7e\x8c\xbe\x2d\xba\x63\x6b\x4c\xcd\x27\x57\x56\x0a\x32\x09\xfa\xf9\x46\x1e\xec\xf8\x5a\xc9\x1b\xc1\xa5\xea\x88\x2f\xca\x54\xd2\x88\x3b\xfb\xd9\x58\x1f\x9f\x6b\x18\x63\x98\xc7\xba\x95\x9a\xfa\x98\xc5\xbe\x0f\xa5\x58\xf8\x32\xd9\x08\x8a\x59\xd2\x82\xc5\x79\x3b\xfd\xc9\x33\x73\x7e\xf2\x47\x21\x49\x21\x97\x14\x72\x79\x88\x52\xc8\xa5\x1f\x19\xd1\xbd\xb2\x67\xdc\xc6\x5b\x22\xaf\x43\x43\xcf\x23\xde\x02\x7c\x19\x02\x38\xd9\x8a\x0b\x76\xaf\x85\x76\x07\xc3\xde\xb3\xda\x8a\x40\xdb\x04\x09\xf7\xe6\xeb\xae\xa4\x06\x8d\xd7\xe4\x96\x58\x33\xce\x6c\x47\x6f\xae\x9b\xa8\x4a\x72\x5a\x14\x84\x71\xbc\x54\xf0\x0b\x7b\xf3\x6c\x6a\xd8\x59\xf7\x8a\x6b\xe2\x0a\x4b\xb3\x24\xb7\xf3\x80\x6e\xb6\x6b\xf5\x54\x95\x6d\xbd\x8d\x79\xe9\xd8\x44\xa0\x3f\x08\xed\xc6\x66\x79\x2b\x8d\x5b\x53\x77\x1f\xf7\xcf\xc5\x20\x8f\xab\x45\x01\x51\xa5\x44\x86\xd9\xf7\x4d\x4d\x20\x6c\x24\x19\xe8\x9f\xc1\xd5\xd4\xf4\x06\xf1\x98\x19\xe4\xc0\x33\xc0\x72\x4b\x15\xa8\xe0\xa3\x37\x59\x61\x57\x68\xbe\xac\x73\x40\x7c\x0d\x2e\x5c\x99\x80\xca\x5b\x64\x73\x8c\x03\x15\xb0\x7d\x42\x58\xbb\x51\x2c\x0e\x86\xd6\xa0\xdb\x03\xf4\x1c\x5a\x81\xae\xea\x81\x7d\x47\xa0\xba\xa9\x63\xe0\x01\xb9\x07\x31\x96\x7f\x9c\xc5\x5d\x23\xc4\x82\x1f\x69\x1b\xa6\x76\x03\x06\x08\xc6\x0d\xb9\x60\x49\x50\xe7\x74\xf2\x3c\xa0\x77\x71\x46\xef\x56\x83\x37\x32\x72\xb4\xcd\xd8\x7d\x3e\xe6\xea\x76\x53\xd5\x1b\x9b\x83\xc4\x7f\x8f\x07\x31\x34\x07\x30\x32\xe3\x0d\xcc\x47\x32\x2e\xbb\x58\x9e\xa1\x4c\xc2\x58\xf8\x4d\x29\x61\xca\xee\x06\xd0\x4e\xa7\xbc\xf1\xeb\xb3\x1c\xb8\x66\x53\x16\x8e\xf9\x16\x66\x60\x25\x70\x5b\xb6\x85\x66\x73\xbc\x6d\xa3\x02\xbc\x0d\x18\xf6\x39\xa5\xc9\x58\x77\xc2\x70\x17\xc4\xd5\x30\x2e\x94\x74\x3b\xa4\xdb\x21\xdd\x0e\xdb\xe8\x11\x6f\x07\x77\x76\x9f\xfe\x6a\xc0\x0a\x0a\x71\xe5\x21\xde\xb6\xea\xec\xe0\x89\xb4\xb2\xdf\x7b\x32\xfb\x9f\x95\xa6\x02\xd5\x09\x7e\x6b\xef\x88\x72\xb0\x78\xe1\xd7\x5d\x56\x45\xd1\xb7\xf2\xb0\xa5\xee\xc2\x9d\xe3\x8a\x95\x55\x51\xb8\x22\xb3\x63\xf2\x81\xf7\x5f\x37\x31\x25\xa7\xc5\x2d\x5d\xa9\x63\x72\x01\x4b\x90\xc7\xe4\x7c\x7a\x21\xf4\xa5\x7d\x9a\x45\x74\xc8\xd2\xc2\x31\x26\x6c\x4a\x5e\x17\x54\x83\xd2\x44\xd3\x19\x3a\x1b\xea\x1e\x34\xfd\xc7\x2b\x3b\x03\x6c\x1a\xbb\x0d\xd1\xcc\xb6\xaf\xda\xdd\x10\xa5\xdf\xfa\xba\x6b\xa3\x5f\x59\xb4\x0a\x36\x85\x6c\x95\x15\x71\xa7\xf1\x9d\xe7\xe2\x33\xaf\x68\x51\x88\xdb\x00\xd5\x82\x80\x8d\x2d\x05\xee\x3e\x91\xb2\xfd\xa5\x50\xfa\x4a\x53\xa9\xa3\x6b\xf7\x1f\x5c\x7a\x56\x66\x49\x33\x5a\xf4\xad\x20\xe7\x89\x2d\x16\x90\x33\xaa\xa1\x58\x11\x3a\xd5\x20\xdb\x3d\xee\xc3\x38\x2a\x5b\x37\xd6\x95\x51\xc4\x0e\xa5\x94\xe7\x05\x48\x32\xa5\x2c\x14\x51\xbe\xe1\x58\xb4\x7d\x55\xd1\x7b\x17\x9a\x90\x5c\xd7\x7b\x24\x34\xcb\x84\xcc\xb1\xe8\x8d\xf0\xe5\x19\xcd\x3f\x85\x59\x90\x5e\x6d\xa2\x35\xb3\xa0\x9c\xce\x00\x9d\xcc\x11\x76\x5c\x33\xf5\x49\x21\xb2\x1b\x45\x2a\xae\x59\xe1\xda\xbf\x8a\x9b\x40\x9e\x8b\xb2\x40\x05\x14\xa1\x9f\xea\xff\x1c\xd5\xaa\x62\x64\x46\xa4\x4e\x7e\xdb\xfc\x13\xfe\x22\xc4\xee\x8a\xb4\xe6\x63\x6d\x79\xb8\x83\x2c\xd4\x52\xeb\x1c\xd7\x0f\x1c\x10\x2c\x89\x78\x54\x11\x70\x9d\x7a\x72\x10\xd7\xa9\x30\x0a\xd4\xc8\x6b\x6c\x37\xe7\x16\xe0\x78\x4c\xce\xee\x20\xab\x7f\x0e\x37\xe9\xcd\x08\x6d\x89\x6a\xac\x9a\x48\x6f\x02\x1b\xe5\x0d\x02\x9b\x18\x02\xb4\x10\x58\xb0\xac\x4d\x6b\x1d\xf8\x90\x5f\x78\xfb\x63\x4b\x56\x27\x5a\x56\x05\xe3\x60\x4b\xcc\x62\x41\xb3\x28\xbe\x8c\x2b\x96\x43\x57\xe5\xc6\x85\x81\x0d\xab\x8d\x56\x83\x51\x1c\x7d\xb2\xb9\x9f\x3f\x96\xfd\x15\xc1\x2d\xaa\x2d\x1d\x1e\x9c\x1c\x1c\x6d\x44\x00\xa3\xa0\x43\x84\x4c\x59\x01\x6a\xa5\x34\x2c\x6c\x91\x07\x37\xe2\xb8\x1d\x52\x44\xb1\x45\x89\xbd\x28\x21\x3b\xc8\x8f\x09\xd3\xb1\xd2\x64\xec\x32\x5b\xb3\x1e\x77\xdf\x55\xc2\x8b\xdb\x77\x25\x88\x96\x34\x67\xce\x2f\x87\x1c\xcd\x17\x68\x59\x65\x21\xfd\xbc\xba\x74\x78\xf0\xcb\xc1\x31\x01\x9d\x1d\x91\x5b\xc1\x0f\x34\x8a\x58\x1c\x2a\xe6\x5a\x60\xdd\x77\x3f\x79\xec\xbf\xc1\x21\x36\x64\x2e\x5a\x8d\x3a\xd0\x58\x23\xa2\x8a\x93\x54\xac\x46\x4b\xb5\xaf\x56\x78\x76\xc7\xb4\xcb\x32\x8b\x62\x2b\xa6\xe4\x25\x1a\x57\xd6\x80\x23\x54\x91\x82\x2d\xe1\x64\x0e\xb4\xd0\xf3\xb8\x13\x6b\x4e\x29\x17\x7c\xf4\x4f\x90\x02\xeb\x21\x72\xc7\x35\x66\xc7\xc2\x03\x8f\x6d\x0a\x0c\x42\x6e\x0e\x24\xc2\x37\x64\xec\xae\x6f\x20\xe8\x65\x40\xd6\xef\x97\xbf\x5e\x5f\x5f\x7e\x03\x7a\xa0\xab\xdc\x8c\xcc\xa7\x79\xb4\xda\x57\x3c\xe1\x9d\x1e\x0b\x84\x1c\x91\x52\x04\x1a\xf7\xc3\x18\x14\x73\xa1\x82\x77\x9a\x6c\xec\xb6\x50\xda\xf6\xf8\x8c\xac\xe5\x90\x09\xce\x21\x33\x7b\xdc\x49\x07\x8c\xbe\xf9\x4b\x91\x93\xf3\xcb\x31\xf9\x9b\xa8\xcc\xea\x4d\xe8\x24\xb8\x07\xbf\xa5\x5b\xca\xb5\x2f\xea\xf7\xc2\x4c\xff\x45\x58\xd9\xb9\x86\x8c\x8c\xff\x15\x68\x0e\x52\xe1\xfd\x04\x34\xa8\x2d\x9e\xa7\x41\xc0\x6a\xad\x31\x0d\x67\x79\x56\x4a\x8b\x05\x99\x5b\xb6\xb1\x57\x50\xab\xa8\xa2\xd3\x0f\x71\xb7\xaf\xd1\x5b\xd6\x37\x64\x5e\xe0\xa5\xbd\x83\xdc\x58\x3f\x8b\x5b\x62\x43\x4b\xdb\xfd\x8d\xe2\xe9\xb9\x4e\xb0\xd3\x44\xd6\xde\xe0\x48\xbe\x5a\xf8\xae\xdb\x66\x97\x71\x73\xcc\xf1\x8d\x44\xae\x0e\x02\x85\x27\x83\xc0\xe1\x49\x78\x19\xc7\x2e\x0b\x04\x94\x45\xf1\x18\x06\x5b\x4f\x06\x41\x64\x93\x6d\x81\xdd\x41\x24\x8a\xd4\x59\xd3\xd1\x8b\x4e\x86\xc3\x04\x93\xb8\x72\x91\x6d\x7a\xec\x65\x8b\x17\x34\x32\xd4\xba\x95\x91\x69\xf9\x9b\x49\xf9\x5a\x10\x9a\x65\xa0\x22\xdf\x30\xf6\x42\x42\x75\x65\xfb\x61\x3d\xf9\x4d\x6e\xac\xcd\xc1\x96\xca\x42\x02\x24\xe1\xd5\x62\x12\x29\x59\x75\x75\x19\xa9\x87\x5d\xfc\x56\x89\xe5\x8b\xf8\x61\xfa\x80\xb4\x37\x35\x28\x9f\x01\x79\x15\x6b\xbc\xfc\xe9\x8f\x7f\xfc\xfd\x1f\xc7\x76\x39\xdd\x37\x44\xbe\x6f\xc9\xf9\xe9\xc5\xe9\xcf\x57\xdf\xbf\xc1\xea\x98\x51\xc8\x85\xd8\x14\xce\xe1\x12\x38\x07\x4b\xdf\x7c\xb4\xe4\x4d\x2c\xb1\x12\xa5\xbb\xbb\x98\x0a\x64\x67\x24\xa4\x52\x71\x12\x31\x15\xd2\xbf\xa9\x5c\x68\x29\xb6\x3c\x88\x79\x37\x8e\x3b\xf5\x43\x8d\x9a\x7b\x52\xfd\xa6\xb3\xf2\x4a\x64\x37\x03\x79\x2e\x0e\xae\xdf\x5c\x5a\x76\x03\x38\x2f\x28\xf7\x61\x08\xc6\x97\xa2\x58\xda\x4e\x96\xd7\x6f\x2e\xa3\x14\xf9\xd8\x70\xc0\xa8\x9b\x8d\x65\xaf\xcc\x58\x7d\x81\x8a\x60\xce\x16\x04\xc5\x16\x65\x61\x63\x85\x94\x48\xa0\x05\x53\x9a\x65\xe1\x3c\xdf\x5c\x36\x01\x7c\x1c\x71\x28\xf6\x29\xf9\x6c\x06\xf6\xd9\x1c\x84\x34\xea\xdd\xa4\xb6\xeb\xa7\xed\xbe\x89\x7c\xa4\xb6\xfa\x01\x37\xee\x9b\x98\xf4\xbd\x67\x68\x45\xa1\x59\x82\x76\x54\xec\x4b\x24\x59\x51\xc9\x8a\xda\x97\xd3\xf3\xb3\xa2\x4a\x09\x57\x5a\x94\x03\x20\x92\x2c\xa3\x01\xf1\x48\x13\x98\x0a\x09\x43\x00\x92\x5a\x60\xa1\xbc\x02\xd7\x92\xf7\xf4\xf2\xdc\xbb\x2d\x83\xf8\x8a\x0e\xb8\xc7\x26\x4d\xfa\x7e\xbc\x05\x5b\x02\x07\xa5\x4e\x10\x46\x54\x95\x61\x40\x22\x29\x26\x80\xa0\xa9\x4a\xc2\xb1\xd9\x29\x58\xe0\x6a\x1f\xc7\x15\x91\x32\xcb\x09\xdc\x32\x02\x6d\x5b\xb2\xd6\x18\xad\xc0\x28\xb2\xb1\x84\xec\xb6\xdb\x1e\x67\x6d\xb4\x56\x26\xa9\x9a\x07\xda\x71\x42\x62\x8f\x5f\x65\x07\x29\x81\x2a\xc1\x6d\xf0\xdf\x6d\x29\x13\x61\x9e\x7f\xa6\x48\x49\x95\x82\xdc\x5f\x73\x6e\x01\xf0\x8b\x82\x38\x5e\xda\x46\xff\xad\x81\x91\x99\xa4\x19\x90\x12\x24\x13\x61\xc7\x01\xab\x9a\xe6\xe2\x96\x93\x09\xcc\x18\x57\xfe\x4c\x98\x01\xbb\x03\x17\x16\x6d\x14\xe2\x06\xb0\x57\xb2\xef\x80\x37\x26\x1f\xeb\xa6\x09\x61\x3b\x35\xf5\x45\xa2\x32\xd1\x74\x97\x71\xab\x3a\x04\xe8\x0f\xf3\x6b\xf1\x98\x55\xb4\x08\x8c\x20\xd5\x8a\xc0\x27\x65\xe3\x42\x6e\x6c\x5c\x10\xef\xf6\x66\x6f\xa2\xff\x62\xd6\x74\x3b\xfa\x2f\x78\x49\xcd\xe6\x37\xe8\x3f\x73\xc4\x6a\x34\x61\xd4\xa2\xae\x4b\xbc\x6d\xc5\x4d\xb3\x79\x48\x7b\x36\xb2\x77\x8b\xb6\x04\x4b\xec\x41\x09\x96\x98\x60\x89\x2d\x4a\xb0\xc4\x04\x4b\x4c\xb0\xc4\x04\x4b\x4c\xb0\xc4\x04\x4b\xdc\x4a\x09\x96\xb8\x3f\x25\x17\x77\x82\x25\x26\x58\xe2\x9e\x94\x60\x89\xcf\xe7\x96\x48\xb0\xc4\x04\x4b\xdc\x4e\x09\x96\x18\x40\x09\x96\x18\x46\x09\x96\xb8\x37\x3d\xc3\x80\x7a\x82\x25\xa6\x80\xfa\x43\xf4\xaf\x10\x50\xf7\x94\x60\x89\x4f\xa9\xdf\x12\x2c\x31\xc1\x12\xf7\xa1\xe4\xb3\x49\xb0\xc4\xe7\x64\x45\x25\x58\x62\xb2\xa2\x1e\xa2\xcf\xdd\x8a\xf2\xe0\xb9\x4b\x29\x26\x71\x85\xe7\x90\x83\xc2\x6e\x92\xff\xc2\x65\xe7\x42\xf1\x22\xf7\x62\x45\x42\x9b\xf4\xec\xc0\x89\xb4\xf0\x1e\x41\x6c\xbb\x18\x91\x81\xb0\x1e\x4f\x8c\xf3\x89\xc4\x77\xec\xc0\x76\x44\x59\xfb\x3b\x70\x1d\x2e\x3a\x1f\x15\xcf\x6b\x30\x1d\x6d\x5c\x46\x30\xcb\x1a\xcf\xb1\x0d\x93\x11\xcc\x55\x0a\xa1\x87\xc7\x63\xec\xc0\x62\xc4\x0c\x74\x1b\x0e\xc3\x63\x29\x82\x99\xae\x61\x30\xd6\x70\x14\xe1\x63\xdd\xc0\x5f\xb4\x30\x14\xc1\x5c\x5b\xd8\x8b\x75\xfc\x44\x30\x4f\x8f\xbb\xd8\x82\x9d\x08\xe6\x29\x2a\xfd\x18\xb8\x89\xc7\xc2\x4c\x0c\x8e\x97\x88\x8f\x82\x45\x46\xc0\xa2\x4c\x20\x87\xce\xbf\x9e\x4b\x50\x73\x51\x04\x29\xeb\x8e\xa2\x7e\xcf\x38\x5b\x54\x0b\xa3\x57\x94\xd1\xad\x6c\x19\xa6\x56\xdd\xc0\x54\xad\xfe\x6c\x36\x81\x0d\x9e\x19\xe6\x2c\x07\x19\xe8\x19\x30\xbc\x8d\x48\x61\xbd\xd8\x39\x45\x0f\x86\xaa\xb2\x0c\x20\x0f\xbd\xbf\xdb\xce\xa3\xdf\x8f\xeb\x55\xb0\x9d\x9a\x02\xf5\xe0\xab\xf0\x3b\x3f\xdc\xb4\xb6\xdd\x91\x91\xc3\xef\x7f\xd7\xfb\xf3\x11\x70\x9b\xfb\xa1\x36\xc1\xb7\xfe\x80\x30\x9b\x48\x73\x2a\xc6\x55\x13\xec\xa6\x89\xb5\xe1\x62\xdc\x33\x3b\xe1\x34\xde\xa7\x12\xae\xf2\xba\x50\x9a\xb6\x3f\x25\x98\x67\x1b\x46\xb3\x05\x0a\x13\x61\x74\x0c\x09\x83\x19\x04\x10\x17\x09\x7f\x79\x0c\xe8\xcb\x76\xd8\x0b\xea\x81\xf0\xab\x7e\x58\xc8\xcb\x93\x5f\xf4\x3b\x60\x2e\x0d\x50\x25\xc6\xa9\xd3\x85\xb8\xb4\x40\x2a\x31\xd0\x9e\x61\xe0\x2d\xcf\xa6\xf9\x74\x94\x4f\x33\x16\xce\x32\x0c\x94\x65\xe0\x7e\xc7\x83\x61\x31\x06\x82\xaf\x0c\xe4\x0e\x1c\x00\xb6\xf2\x98\xcb\x14\x0f\x57\x89\x5e\xa7\x18\x98\xca\x63\x40\x54\x86\x83\xa7\xc4\x2f\x4d\x44\x40\xe5\x31\x20\x29\x5b\x03\x29\x6e\xc1\x22\x1c\x69\x6b\x41\x94\xe8\x30\x45\x37\x80\xb2\x16\x04\x09\xe6\xea\xc3\x33\x43\x05\x40\xa2\x82\x1f\xc3\x04\x3e\x06\x09\x7a\x3c\x0a\x6c\x24\x0e\x32\xb2\x1b\x2e\x62\x1e\xa5\x31\x92\xda\x85\x8a\xac\xc1\x3d\x62\x4c\xd6\x18\x98\x48\xd4\x26\x32\xce\x34\xa3\xc5\x5b\x28\xe8\xea\x0a\x32\xc1\xf3\x20\xab\x60\xad\x59\x9f\x0b\xde\x4e\x89\xb2\x2c\x83\x66\x66\xbd\x2a\xdd\xb4\xef\x39\x75\x9d\xb4\x03\xcd\x49\x57\x35\xc0\x07\xd6\x9c\x41\x89\x71\x31\xbb\x12\x01\x3d\x9f\x2d\xf5\x4c\xd1\xbe\x15\xf2\xa6\x10\x34\x57\x27\xa5\xb0\xff\xaf\x49\xd0\x6e\x65\x66\xdb\xf1\x85\xa7\x66\x3f\x95\xfb\xc6\xe6\xbd\x0f\x25\x50\x7f\x15\xb7\x44\x4c\x35\x70\x72\xc8\xb8\x97\xa9\x30\x87\x78\xe3\xb8\x69\x7c\x81\xf5\x11\x0e\xe5\xf8\xea\xa5\x1f\xd4\xbf\xae\xc3\x0e\xdd\x9e\x4a\x3d\x3f\xff\xaf\x1b\xd8\x76\x07\x70\x60\x38\xd2\x3b\x8d\x3d\xf3\x69\x55\x74\x9c\xc0\x11\xce\xe4\x2e\x7c\xf0\x55\xdd\x23\x37\x4c\x8e\x70\xce\xb5\xb6\xa3\x3c\x27\xae\x0e\x4f\x2d\xa8\x41\x7c\xbd\x70\x7f\x8a\x82\x1a\x05\x88\x7c\x1c\x30\xe4\x4e\x20\xa4\x05\x34\x06\xf1\xbc\x07\x04\x69\xc1\x8c\x41\x5c\xb7\x02\x20\xbb\x40\xc6\x40\xef\x70\x18\xf8\x31\x79\xd2\xfb\xd1\x0e\x90\x23\x3a\xd5\x83\xed\xd7\x2d\x00\xc7\xda\xa9\x1e\x63\x14\x0f\x00\x6e\x7c\x46\x6f\xf0\x61\x00\x8d\xe9\x0d\x9e\xde\xe0\x1b\x34\xf8\x1b\x5c\xb3\x05\x88\x4a\x3f\xcb\x07\xe1\xed\x9c\x65\xf3\xb6\x1d\xc7\x16\xa0\x88\xa8\x02\xef\xc9\x8e\xb5\xe5\x86\x37\x98\x71\x94\x5e\x85\x35\x85\xc4\x0b\xb6\x38\x31\xad\xfa\xdb\x82\x41\xed\xff\x60\xf1\x38\x4b\x42\x15\xa1\xe4\xed\xc5\xd5\xcf\xef\x4e\xff\xf3\xec\xdd\x98\x5c\xcf\x99\x0a\xd3\xd2\x5e\x41\x55\x9c\xfd\xa3\x02\x42\x17\xc2\x58\x72\x45\x1b\x2a\xdb\x1f\x73\xc5\x38\xd3\x2d\x06\x68\xc5\x0f\x07\xc2\x0d\x50\x3c\xe6\xf2\xe9\xad\x14\xba\xbe\x7a\xc3\x61\x1d\x8d\x1c\xe4\x9c\x1b\x6e\x21\x42\xe2\xb1\x81\x71\xd8\x35\x40\xaa\x1b\xb7\x59\x14\x22\xa1\xb4\x8d\xd7\x43\x54\x24\x25\x1c\xb4\xd1\x21\xd6\x3e\x60\x9c\x50\xa2\x18\x9f\x15\xed\x5c\x85\xfe\x97\x41\x8c\x8d\x1d\x6e\x61\x8f\x9a\x31\x5f\x86\x18\xda\x71\x66\x76\xe7\xbb\xc3\x4c\x84\xad\xe6\xdf\xd4\x9b\x6e\x81\x57\x3d\xdc\x95\x42\x81\x8f\x52\x95\x22\x3f\x50\xe4\xfc\x92\xd0\x3c\x97\xa0\x82\x11\x09\xa8\xf1\x6a\xeb\xca\xdc\x60\x2c\xb7\x03\xb5\x21\xa3\x50\xa0\xe8\x4b\xf2\x1f\xe4\x8e\xfc\x07\x1a\x7f\x7f\x0a\x1b\x5c\xbc\x91\x15\xe7\x2e\xb0\xef\xad\xf3\xcb\x01\x64\xe0\x87\x39\xd5\xc8\xcd\xec\x98\x16\x64\xc2\x82\xcb\xcc\xe1\x25\x78\xa7\x41\x72\x5a\x78\x89\x8a\x59\xdf\x60\xf3\xd3\x4c\xe7\x59\x1f\x11\x1b\xa3\x39\x9f\x36\xf7\x7d\xa8\x30\xeb\x5f\xe5\x90\x98\xa1\x9a\xd7\xf8\x85\xd5\xe4\xa1\x00\x21\xd5\x9a\x6f\x6b\xe4\x0b\xaa\xb3\x79\x20\xcf\xce\x45\x65\x4c\x5b\xd5\x36\x4b\x42\xdf\xc8\x02\x4d\x00\x8b\x25\x9f\xb3\x40\x0d\xf6\xf4\x4a\x22\x1c\x0e\xd3\x39\x03\x1d\x39\xc5\x05\x09\x5c\xd7\x5d\x4f\x65\x12\x5e\xda\xd2\x19\xb5\xad\x7a\xd1\xa5\xc8\xc7\xe4\x8c\x06\x4b\x94\x59\xb4\xbc\x65\xab\x94\x22\xb7\x03\x9f\xd3\x40\x67\xbf\xb1\x81\xdc\x38\x0d\x73\x97\xf8\xe8\x1c\xff\x81\x1c\xad\x36\x32\xba\x3b\xa3\xdc\xac\xa9\x84\x29\x48\x89\xc9\x0f\x81\x2c\x27\x2b\xc4\x96\xb0\x0c\xa2\x04\x3e\x58\x6b\x97\x52\x68\x91\x89\xc0\xec\xe6\xf5\xcc\x42\x64\x85\x8b\x1c\xee\xae\x26\x3e\xce\x41\xbe\x7b\x7b\x79\x4c\xae\xdf\x5c\x1e\x13\x21\xc9\xd5\x9b\xeb\xcb\xe8\x08\xbb\x16\xe4\xc5\xf5\x9b\xcb\x17\xbf\xf2\x52\x4b\xa0\x39\x4b\xe9\x9b\x5d\x4a\xe9\x9b\xf7\x53\x4a\xdf\xdc\xa0\x94\xbe\x99\xd2\x37\x53\xfa\x66\x4a\xdf\x4c\xe9\x9b\xfb\x0c\x20\xa5\x6f\xf6\xe3\x9d\xd2\x37\xb7\x53\x4a\xdf\xdc\x49\x09\x74\xd2\x8f\x52\xfa\x26\x49\xe9\x9b\x7b\x52\x4a\xdf\x7c\x98\x52\xfa\x66\x4a\xdf\x4c\xe9\x9b\xf7\x52\x4a\xdf\xdc\x8f\x52\xfa\xe6\x4e\x7a\x46\xd0\xd1\x94\xbe\x99\xa0\xa3\xbb\xb9\x3c\x2f\xe8\x28\x49\xe9\x9b\x3d\x3f\x9e\xd2\x37\xf7\xa6\x94\xbe\xb9\x2f\xa5\xf4\xcd\xfd\x39\xa6\xf4\xcd\x94\xbe\x99\xd2\x37\xf7\xa6\x94\xbe\xb9\x46\x29\x7d\x33\xa5\x6f\xee\x4f\x29\x7d\xb3\x07\x97\xe7\xf3\x06\x4f\xe9\x9b\xe9\x0d\xbe\x9b\xcb\xf3\x7a\x83\xa7\xf4\xcd\x94\xbe\xb9\x95\xc2\x4d\x2f\x09\x4a\x54\x32\xeb\x7f\xe9\x75\x24\xe9\xa3\xe7\xf2\x78\x50\x56\x72\xe6\x7f\xdb\x9b\x67\x2b\xc7\xb2\x52\x40\x54\x69\x06\x29\x9b\x21\x17\x12\x68\xde\x3f\xe6\x67\xa6\x98\x61\xe0\xb1\xb9\x14\x3f\x11\xc4\x6d\xc1\x16\xac\x7f\xaa\x27\xd9\x50\x20\xef\x90\x4f\xab\x11\x72\xe8\x75\xb7\xa0\x77\xf8\xaa\xa4\x0b\x51\x71\x6d\x34\x52\x26\x16\x65\xa5\x5b\xfb\x14\xa6\x9b\xac\x18\xf6\x3e\xf1\x99\xe0\x53\x36\xab\x24\x35\x13\x3d\x59\x50\x4e\x67\x30\x72\x43\x1a\xd5\x43\x1a\xd5\x92\x75\xf2\x04\x56\x32\xcd\x3d\xb2\xf1\x32\xd2\x74\x2d\xa9\xd6\x20\xf9\x6b\xf2\x5f\x87\x7f\xff\xf2\x97\xd1\xd1\x57\x87\x87\x3f\xbe\x1c\xfd\xe5\xa7\x2f\x0f\xff\x3e\xc6\xff\xf8\xf7\xa3\xaf\x8e\x7e\xf1\x3f\x7c\x79\x74\x74\x78\xf8\xe3\xb7\xef\xbf\xb9\xbe\x3c\xfb\x89\x1d\xfd\xf2\x23\xaf\x16\x37\xf6\xa7\x5f\x0e\x7f\x84\xb3\x9f\xf6\x64\x72\x74\xf4\xd5\x17\x81\xef\xb2\x60\xfb\x60\x08\xeb\x60\x00\xdb\x60\x70\xcb\xc0\x01\x2a\x06\x38\xd2\x1f\x1d\xa7\x48\xd4\x81\xd1\x87\x0b\xe7\x2a\xda\x38\xd4\x41\x1c\x1b\x85\xed\x9f\x87\x98\x53\xe7\xc7\x1b\xf6\x6e\x56\x44\x2c\x98\xd6\x36\x49\x9d\xd0\x36\xc8\x9b\x85\xbd\xbc\xda\xb0\x25\xa7\x1e\xd9\xd4\xa2\x67\x03\xbd\x8e\x2d\x20\x6f\x2b\x87\x4c\xe8\x39\xc8\x5b\x16\xd8\x2a\xd5\xbc\x51\x78\xe3\x36\x40\x35\x37\xca\x61\xca\x38\xb8\xf0\xf3\xaf\xe2\x6e\x4f\x6a\x36\xa9\xd9\x87\x78\x3c\x17\x35\xab\x20\xab\x24\xd3\xab\x37\x82\x6b\xb8\xeb\xed\x7d\xe8\x46\x3f\xbb\xbc\x5c\xee\x43\x90\x05\xea\x0a\x6c\x7c\xca\xf9\x5f\x38\x8d\x4b\xc9\x96\xac\x80\x19\x9c\xa9\x8c\x16\xa8\x10\xe2\x2f\xb3\xd3\x1d\x9c\x43\xe3\x0b\x5a\x8a\x42\x91\xdb\x39\x18\xe5\x4b\xa8\x99\x35\x3a\x7a\x32\x1a\xc6\x72\x46\x19\x27\x0b\xa3\x33\x4b\x3f\x48\x63\x43\x1b\xd5\x1c\x78\xa3\x99\xb7\x0d\xd7\x7e\x60\xae\xe6\xcd\x44\x88\xc2\x25\x1c\x15\x61\xd8\xc6\x7a\xee\xcc\x3a\xba\xb8\xf8\x99\xc3\xed\xcf\x66\xd4\x8a\x4c\x0b\x1a\x76\x3e\x6f\x59\x51\x60\x6e\x1d\xe8\x8d\xde\xab\xf5\x14\x82\x38\xef\xda\x78\x9b\x71\x12\x08\x7f\xa0\xc5\x2d\x5d\xe1\xf6\xaf\x8f\x95\x05\xde\x08\xaf\x8e\x88\xac\x38\xa1\x8a\xd4\x63\xcd\xc9\xef\x8e\x30\x9e\xfe\xe6\xf4\xf2\xe7\xab\xbf\x5d\xfd\x7c\xfa\xf6\xfd\xf9\x45\xf8\xbd\x67\xf6\x1e\x02\xc4\x33\xa3\x25\x9d\xb0\x82\x85\x5e\x77\x1b\xf8\xb8\x36\xc3\xd0\x0c\x6b\x9a\xe7\x27\xb9\x14\xa5\xdd\x03\x59\x71\xce\xf8\x2c\x42\xe1\x59\x7a\xbb\x86\x34\x77\x26\x9c\x15\xcb\xb0\x2c\xd2\xce\x64\x67\x92\x72\x63\x62\x4e\x56\x5d\xb1\x09\x33\x83\x2b\xae\xd9\xe2\x53\x4c\x17\xa5\xf9\x30\xa9\xa2\xa7\x79\x0e\x79\x67\x85\xa3\xa2\x10\xcf\x05\xf0\xfd\xc6\x4f\x68\xd5\x14\x8b\x0a\xe6\x4c\xc8\xe5\x87\xab\xf3\xff\xbd\x76\xe8\x56\x65\x44\x0a\x6c\xac\x31\x66\xce\xed\x20\xfb\xff\x11\x16\x62\x99\x24\xe0\x41\x7a\x56\x12\x50\xdb\x37\xf1\x08\x94\x8f\x15\x6f\x5f\xbd\xbc\xc5\x3b\xcc\xf7\x28\x72\x18\x93\x4b\x6b\x6e\x80\x1a\x80\x63\xbb\xa8\x9f\x04\x62\xd8\x72\xcd\x68\x51\xac\x08\xfc\xa3\x62\x4b\x5a\x84\xee\xac\x16\x36\xc5\xbb\x53\x04\x29\x0a\xe9\xa8\x05\x99\xd2\x42\x45\x5c\x28\xa1\x16\x86\x31\xf0\xde\x8b\x8a\xc7\x23\x3e\x6a\x4e\x24\x07\x2e\x74\x84\x1f\xda\xcc\x08\x2b\x55\x49\x91\x11\xeb\xb6\x8a\x84\xa8\x76\x6e\x7c\x65\xf3\xe7\xbd\x81\x11\xe8\x10\x72\xfb\x7d\x59\xcf\xda\xc6\xde\x2a\x15\x31\xf1\xe6\x40\x39\x03\xa3\x71\x64\x85\xce\x5c\x02\xcd\xb1\x1a\x47\x49\xf5\xdc\xa2\x9c\x16\x54\xdd\x40\x6e\x7f\x11\x88\x9f\x31\xef\x19\xe7\x00\xb4\x65\x34\xea\x65\xb8\x0e\x55\x6e\x53\xa0\xba\x92\x80\xef\x18\x97\x18\x05\x9c\x4e\x8a\x30\xcc\x69\x94\x9a\x34\x6b\xf6\x81\x17\xab\x8f\x42\xe8\xaf\xeb\xaa\x0b\xd1\x07\xe4\x07\xf7\x62\xc5\x82\x61\x71\xf6\xe7\x1c\xab\xaa\x9a\x71\x8e\x70\x73\x51\x1d\xb5\x0a\x44\xc4\x48\xb4\x79\x9d\x3d\x91\x32\x92\x15\x3f\x55\xdf\x48\x51\x05\xd9\x28\x1b\x8f\x9d\x6f\xce\xdf\xa2\xae\xae\xc2\x11\x20\xc0\xb5\x5c\x95\x82\x59\xcf\xf9\x80\x6f\xe3\xef\x8c\x9e\x58\x3b\xe5\xe6\x5d\x5f\x71\x05\x81\xd1\xfa\xf7\x74\x45\x68\xa1\x84\x7f\xcc\x33\x4e\x2e\x11\x67\xdc\xf6\x72\x8d\x09\x39\x0f\x7b\x4d\x39\x96\x13\xa1\xe7\x1b\xae\xb3\xd0\xba\x67\x9b\xe3\xb3\x75\x5d\xc2\x73\xaf\x9a\xc2\xc3\x8c\x6f\x0c\x53\xd3\x9b\x40\x2b\xb5\x94\x90\x41\x0e\x3c\x8b\x38\x15\x83\x20\x0b\xfe\xf4\x87\xb0\x53\x75\x21\xb8\x51\x67\xd1\xe7\xea\x9c\xe7\x2c\xa3\xf6\x76\xa7\x7a\x80\x9b\x0e\x71\x4b\xce\xff\x42\xb1\x68\x88\x51\x66\x41\x4c\x2b\x05\x12\xc3\x51\x5a\x56\x60\x05\xe9\xdb\x6a\x02\x05\x68\x74\x72\x85\x02\x56\x58\x4e\x35\x20\x37\xb6\xa0\x33\x20\x54\xfb\x83\x1b\x6a\xeb\x01\x57\xe6\xaa\xb3\xf1\x28\x4d\x72\x01\x71\x65\x77\xa8\x22\xdf\x9d\xbf\x25\x2f\xc9\xa1\x59\xbb\x23\xbc\xe5\xa7\x94\x15\x98\xf0\xad\x69\x10\x12\x93\xac\x7b\xd7\xa6\x7e\xa8\xb8\xc4\xa8\xa7\x82\xd8\x0a\x69\xaf\x97\x63\xc2\x05\x51\x55\x36\xf7\x6b\xcc\x04\x0f\xdf\xa7\x09\xf8\xdc\x05\x04\x1a\x74\x55\x61\x58\x58\x72\xa7\xfa\x8c\x61\xb9\x4b\x7d\x6e\x53\x85\xc1\xbb\x66\x51\x56\xf7\xa9\xc2\x30\xd6\x46\x7d\x0e\xa2\x0a\xa3\x0c\x84\xef\x14\xc8\x41\xec\x83\xef\x9e\xb1\x7d\xd0\x76\x87\x1a\xbd\xd6\xd9\xcd\x30\xe1\x43\xe5\xb5\x00\x4d\x73\xaa\xa9\xb3\x35\xe2\xea\xe9\x25\x8b\x23\x59\x1c\xdb\x48\xc1\x3b\xc6\xab\x3b\x0b\xa1\x1f\x26\x70\x71\x75\x86\x2c\xf1\x78\x05\xeb\x2f\x14\x54\x5a\x96\x05\x6b\xf0\x82\x11\xb5\xfb\x0d\x9d\x77\x8e\xd1\xf1\x70\xb1\x05\x1b\x97\xf3\xf0\x46\xf3\xda\xa3\x3c\x17\x61\xa8\xdb\xb5\xc5\xb3\xa1\x6a\x9a\xcd\xdb\xe0\x76\x73\x98\x03\x03\x72\x49\x01\x3c\x57\x05\xf0\x64\x61\xa6\x02\x96\x10\x58\x00\x79\xed\xf0\xbf\x33\x9c\x08\x53\x5e\x88\x83\x3d\xe7\x38\x24\x52\xd0\x09\x14\xd6\xf2\xb6\x8a\x20\x38\x0c\x4a\xd6\x4d\xe4\x27\xab\x85\x21\x45\x31\x4c\x3d\x80\x8f\xa2\x40\x70\x3e\x8d\x5e\x6c\x33\xa4\xcf\x72\xad\x91\xc1\x10\x6b\x7d\xbd\x2a\x07\x5a\x6b\x74\x97\x7f\x8e\x6b\x5d\x05\x1a\xfa\x64\x7d\xad\xcd\x8b\x61\x98\xb5\x46\x53\xfc\x73\x5b\xeb\x5b\xc6\x73\x71\xab\x86\x34\xd7\x7e\xb0\x2c\xfd\xdd\x98\x85\x5e\xff\x9a\xf1\x99\x6a\x9b\x6c\xdd\x7e\x5f\xc3\xd9\x6c\xc2\x4e\x9e\x4c\x65\xa0\x8d\x85\x51\x90\x0d\xdb\xc2\xa3\xab\x42\xe5\xdf\xc2\xab\xef\xb1\x83\xc2\x4c\xb6\xed\xfe\x86\x64\x07\x6d\xf9\xf2\x38\x3b\x68\xb6\x50\xf4\x8d\x34\xc3\xd7\x8c\x16\x57\x65\x58\x8d\x7d\xb2\x7e\xc4\xbe\x79\x7f\x75\xda\x65\x1b\xac\x7e\x18\x62\xe9\xa4\x75\x70\x1a\xbe\x84\xe6\x0b\xa6\x54\x28\x40\xd3\xd0\x2d\x4c\xe6\x42\xdc\x90\x43\x0f\x3f\x9f\x31\x3d\xaf\x26\xe3\x4c\x2c\x5a\x48\xf4\x91\x62\x33\x75\xe2\x94\xcf\xc8\xac\x54\x78\x1d\x6f\xc6\x0b\xc6\x5d\x2c\x12\x5f\x39\x5c\x2b\xe7\x91\x09\xe6\x89\x8b\x91\xd5\xab\x8c\xf2\x6d\xbb\xaa\x04\xb3\x74\xe0\xb3\xcd\xed\x8b\x4a\xfd\xc5\x7a\x75\x0e\x65\x6a\x6b\xd7\x99\xbb\xae\x28\xe7\x74\x84\x46\x6f\x30\x63\xd7\x0b\x00\x43\x8c\x73\xc1\x85\xb4\xf8\x39\x5b\xfa\x2d\x22\x7d\xdd\xdc\x9c\x36\xb8\x8b\xcb\xe1\x2e\x0b\xb3\x2a\xe1\x6b\xd0\x0a\x1f\x3f\x99\xc1\xb2\x79\xda\x2f\x22\x2a\x41\x3e\x70\xe2\xa3\x44\xc6\x76\x78\xb0\xd5\x09\x9c\xe3\x72\x4d\xdc\x83\x59\xe3\x31\xb1\x5e\xcb\x35\x99\x0c\x97\xc3\x46\x96\xb7\xc9\x64\xcc\x71\x74\xb2\xbc\x2e\x93\xc1\x2c\x5b\xb2\xfc\x3c\x64\xb2\x76\x99\x0f\x26\x8a\xe8\x3a\x77\x0c\x43\x9d\xd1\xa4\x59\xaa\x96\x0b\xbe\xe5\x4e\x0f\xe6\x3a\xac\x1b\x9e\x6c\x41\x26\x77\xdd\xf1\x11\x57\xd6\x63\xb8\xe4\xc9\xde\x6e\xf9\x60\xf6\x8f\x60\x90\x92\xfb\x8d\xd2\x60\x9e\x8f\x14\x08\x23\xdb\x82\x61\x2d\x5d\x17\xa3\x96\x27\xa0\xdb\xaa\x6e\x41\x57\x66\x1b\x73\xa6\x10\x0b\x15\x6e\x92\x31\x3d\x6f\x5f\xb6\x1f\xdb\x6a\xe1\xd3\xbc\x75\x5d\xb5\xae\xd4\x00\xac\xa1\xd4\x00\xec\x7e\x4a\x0d\xc0\x36\x28\x35\x00\x4b\x0d\xc0\x52\x03\xb0\xd4\x00\x2c\x35\x00\xdb\x67\x00\xa9\x01\x58\x3f\xde\xa9\x01\xd8\x76\x4a\x0d\xc0\x76\x52\x2a\x5b\xda\x8f\x52\x03\x30\x92\x1a\x80\xed\x49\xa9\x01\xd8\xc3\x94\x1a\x80\xa5\x06\x60\xa9\x01\xd8\xbd\x94\x1a\x80\xed\x47\xa9\x01\xd8\x4e\x7a\x46\xc5\xc7\x53\x03\xb0\x54\x7c\x7c\x37\x97\xe7\x55\x7c\x9c\xa4\x06\x60\x3d\x3f\x9e\x1a\x80\xed\x4d\xa9\x01\xd8\xbe\x94\x1a\x80\xed\xcf\x31\x35\x00\x4b\x0d\xc0\x52\x03\xb0\xbd\x29\x35\x00\x5b\xa3\xd4\x00\x2c\x35\x00\xdb\x9f\x52\x03\xb0\x1e\x5c\x9e\xcf\x1b\x3c\x35\x00\x4b\x6f\xf0\xdd\x5c\x9e\xd7\x1b\x3c\x35\x00\x4b\x0d\xc0\xb6\x52\xb8\xe9\xa5\x74\xce\x7a\x57\xda\x7f\x8c\xe2\x82\x0e\x69\xd9\x2a\x3b\x31\xa9\xa6\x53\x90\x68\x2a\xe3\x28\x03\x9c\x28\x6b\x6e\x0a\x5f\x3c\x1b\x2b\x75\xcd\x03\x1e\x81\x0e\xa3\xa6\x40\x1f\x63\x0d\x44\x9b\xb5\x69\x87\xe7\xbe\xae\x37\xcf\x66\x78\xae\xee\x06\x56\x99\x97\xa0\xb0\x2e\x1f\x27\x67\x1f\xbe\xee\x7f\x84\x62\x2b\x2a\x86\x17\x4b\xc2\xb5\xf8\xc0\xb3\x38\x60\x74\x23\x54\x5b\xf6\x2f\x54\xb6\xb2\x42\x28\x9b\xf7\x65\x37\x2c\x9b\x53\xce\xc1\x3d\x11\xfb\xcb\x82\x46\xb7\xd7\x04\x80\x13\x51\x02\xb7\x19\x6a\x94\x28\xc6\x67\x05\x10\xaa\x35\xcd\xe6\xfd\x77\xee\x87\x39\x70\x2f\x50\xb6\x59\x41\x6b\xc4\x4a\x4b\xa0\xfd\xd5\x1f\x0a\x96\x84\x05\x65\x76\xa8\x84\x66\x52\x28\x45\x16\x55\xa1\x59\x59\x0f\xb6\xff\xba\x02\x66\x6b\xda\xba\x6c\xf5\xc6\x9b\x71\x2b\xb0\xa0\x41\x59\x41\x7f\x34\x63\x3d\x7b\xb7\xac\xa2\x5d\xf0\x19\x1f\xca\xfd\x79\x32\x45\x60\x51\xea\x15\x31\x22\x54\x58\x94\x37\x93\x4a\x93\xac\x60\xf8\x6e\xc2\x15\x08\x78\x28\x62\xa1\xbb\x9c\xf1\x63\x7c\xc6\x6b\x6c\x4c\x80\x2b\xad\xdc\x52\x07\x54\x8b\x31\xe6\x63\xa9\x15\xc1\xdc\x9c\x66\xc0\x6e\xa8\x39\x53\xce\xa4\x57\xfd\xd7\x81\xfa\x72\xc1\x58\xe9\xa7\x5e\x69\x3c\x1a\x79\xd0\x60\xfd\x6c\x1d\x8b\xd6\x70\x83\xef\x02\xac\xf1\xeb\xfc\xc9\xb5\xb2\xb6\xf5\x79\xbd\x42\x0b\x98\xf9\x66\x66\x96\xaf\x25\xd9\xa8\xf2\x40\x59\xc5\x03\xc6\x61\x69\x74\x14\x64\xc0\x96\x68\x0c\x1b\xcd\x1d\xec\xc3\xf4\x13\xfd\xd5\x14\xb7\xa6\x72\x06\xfa\x8d\x5f\xa3\x90\xa4\xbd\x6e\xbd\xce\xa9\xbd\x27\xd7\xb2\x3d\x23\xee\x47\xbc\x6b\x2f\x45\x7e\x85\x79\x9e\xb6\x08\xa8\x39\xd7\xf1\x9d\x41\xdd\xe4\x5d\x7d\xee\x2d\xa9\x34\x61\x2a\x77\x02\x98\x5f\xe8\xec\x10\xb3\x08\xaa\xa4\x19\x28\x72\x78\x7e\xf9\xa6\xbf\x04\x5f\x9e\xbf\xb5\x98\x73\xbb\x94\x6d\x73\x0b\x4f\x49\x48\x0f\x29\x54\xd4\xbe\xa3\xcd\x96\x89\x87\xb8\xca\xdd\x9c\x6f\xe7\x54\xe3\x91\x68\x4d\x9c\x4a\x20\x6a\x4e\x43\x80\xbd\xb5\x9b\x58\xe4\x63\x72\x21\x34\xd4\x75\x60\x23\xb6\xc9\x97\x3c\xc3\x27\xac\xf3\xc4\x39\x6d\x63\x13\xcc\xc2\x0c\xa7\x80\x67\xa3\x06\xb9\x60\x1c\x6b\x91\xbe\x07\xa5\xe8\x0c\x2e\x03\x20\x18\xbb\x1c\x49\x88\xc2\xf0\xba\xbf\xff\x1d\x87\xb7\x65\x81\xde\xa4\xe6\x9d\xd7\x64\x96\x04\xa4\x96\xb4\xa6\x4b\x16\x76\xbe\xf5\x99\xb9\x95\x4c\x6b\x08\xb0\xf7\x95\xad\xd9\x8f\xe9\x96\xeb\x45\xf6\x0e\x0e\x54\x54\x9d\xf2\xf7\x7e\x90\x76\x70\xe6\xcb\xcc\x7b\x8d\xe7\x36\xdb\x22\xc0\x29\x33\x91\x0c\xa6\x64\xca\x38\x2d\x5c\x32\xc5\xb1\xad\x4a\x4b\x15\xba\x9e\x95\x02\x19\xd4\xad\xcc\x41\xeb\xfd\xba\x8e\xc9\x0f\x6e\x61\xb5\xac\x38\xb6\x2c\xee\x3f\xd6\x95\xeb\xfb\x95\x03\x61\x53\x32\xc3\xa4\x0d\x69\x7b\x95\xfd\xe1\xe5\x5f\xfe\x44\x26\x2b\xf3\x72\xee\xcd\xd7\xa8\x5b\x2d\x34\x2d\x6a\x21\x28\x80\xcf\x8c\xac\x5a\x0b\x35\x24\x71\xa5\xd5\x7e\xc3\x4b\x14\x36\x3c\xb6\x1b\xf5\xea\x77\x37\x93\xe0\xf7\x14\xba\x23\x4e\x72\x58\x9e\xb4\xe4\x77\x54\x88\x80\x14\xd4\x37\x94\x9b\xa7\xe4\x04\x48\x55\xe6\x18\xdf\xee\xeb\x38\x18\x52\xd5\x88\x82\x65\xab\xb8\xeb\xde\x95\xe7\x26\x73\x71\x8b\xb2\xd2\xfa\x96\xde\x8b\xe3\x85\xa1\x49\xbf\x2c\x45\x59\x15\xb8\x4c\xe4\x6b\x16\x10\xcd\x40\x49\xa8\xdc\xf3\xaf\x53\xa7\x65\xab\xde\x0d\x79\x06\xf8\x21\xae\x3d\x5a\xed\xd1\xf6\x53\xea\xcd\x58\xb8\x64\x77\x17\xd2\xac\x2b\x6c\x9b\xbb\x89\x7c\x4d\x8b\x62\x42\xb3\x9b\x6b\xf1\x4e\xcc\xd4\x07\x7e\x26\x65\x00\x92\xa6\xb3\x36\x05\x35\x8f\xa2\x79\xc5\x6f\x6c\xc7\xde\xd0\x9b\xb5\x10\x33\x22\x2a\x5d\x56\xda\x37\x0d\xdc\xa2\xf2\xfb\xab\x36\x66\xeb\x04\xda\xb7\x9c\x7b\x71\xb5\x16\x1b\xee\x58\x88\x7a\xc3\x1c\x75\xca\x09\x98\xf5\xb3\x46\x60\x7b\xfc\xca\x2b\x90\x10\xb1\xf8\xdd\xcb\x3f\xfc\xd9\xaa\x47\x22\x24\xf9\xf3\x4b\xcc\x4d\x55\xc7\xf6\x22\x35\xf6\x51\xc8\x3d\xa7\x16\xb4\x28\x8c\xd9\xd7\x56\x4c\xe6\x60\x8c\x9d\x62\xe9\xaf\xe1\x1b\x45\xf4\xff\xb1\xf7\x2f\xcc\x71\xdc\x46\xbf\x30\xfe\x55\x50\x4c\x4e\x91\x8c\xb9\x4b\xc9\xf1\xc9\x45\x27\x15\x17\x2d\x52\x0e\x2b\x96\xc4\x23\xd2\xf6\xff\xf9\x3b\x7e\x52\xd8\x19\xec\x2e\x1e\xce\x00\x93\x01\x86\xd4\xe6\xd5\xfb\xdd\xdf\x42\x37\x30\x97\xdd\xa5\x2c\x34\x86\x17\x39\x8b\x53\x75\x2c\x29\x0f\x9b\x18\x5c\x1a\x7d\xf9\xf5\xaf\x1f\x4c\x0f\xd9\x34\x95\x73\x6f\x91\xc7\xab\xab\xff\x02\x73\x53\x5a\x23\x8a\x39\xc1\xd5\x2c\x8c\xee\xba\xd4\xec\x83\x8f\xb8\xef\x21\xf7\xb6\xa6\x34\x57\x7e\xbc\x78\xde\x8d\x2e\x9a\x52\x9c\x8a\x1b\x99\xc5\x67\x46\x07\xdb\x35\x90\x14\x78\x80\x0a\x49\xc8\x70\xea\x39\x9b\x15\x3a\xbb\x66\xb9\x17\xd6\x55\x33\x78\x4b\x85\x6e\x34\x78\x36\x0d\xe0\x6c\x9c\x09\xcb\xd3\x1c\x01\x4a\x35\x09\xb1\x8a\xe4\xce\xb5\x4e\xaa\x1f\xe1\xac\xe4\x55\x25\xd5\xc2\x2d\x3a\x67\x35\xbf\x1d\x2c\x3d\x41\xa2\xd3\xb5\x52\xf5\xc3\x31\xf1\xd7\x21\x09\x5f\x40\x47\x17\x4c\xfc\x57\x93\x2c\x04\x62\x79\x49\x1a\x2c\xa1\x9b\x2f\x2d\x9f\x3a\x38\x54\x9d\xb0\x34\x9a\x86\x0a\x24\x60\xad\xfe\x3a\x69\x12\x15\x50\x10\x42\x01\x38\xc7\xd6\xf0\x77\x87\x97\x5c\xe2\x64\x35\x2d\x77\x99\x98\x3a\xa6\x97\xf0\x0c\x76\x0b\x62\x6e\x10\xd7\x28\xb9\x45\x97\x9d\xb8\x0e\x21\x7a\xc7\x59\x25\x6a\x23\x8d\xb3\x9d\x7f\x00\x05\xf3\xb2\xe0\x92\x96\x8d\x6d\xb3\x73\x95\xa6\x6d\x0f\x79\x91\x51\x35\x42\xcb\xb8\xb4\xf7\xec\x42\xe7\x5e\x18\x3c\x40\xd8\x2e\x50\x2a\x02\x21\xef\x1a\x4f\xc7\x80\x6f\xe3\x91\xcc\xb9\xc7\x7a\xb8\x7e\xe8\x76\x27\xf5\xdd\x72\x32\xda\x87\x0b\xe5\xb6\x8f\x0f\x41\xe2\xe7\xf8\x5c\xc1\x1a\x7c\x3e\xaf\x55\x3b\xdd\x11\xd4\x1f\x3c\x53\x7e\xbb\xc7\x78\x61\xba\xc4\xd9\x52\xf8\x6b\xdf\x85\x27\x88\x32\x7d\xb4\x72\xca\x10\x4d\xac\xb4\x0d\xd3\x64\xfb\x2f\xf6\x1f\xe5\xf1\xc1\x2d\xa8\x75\xc5\x17\xe0\xa9\x8f\xb0\x13\xeb\x22\x89\x8b\x95\x0b\x0c\x20\x08\x03\x21\x26\x90\x8a\x09\x85\xca\xcb\x26\x3f\xf4\x90\x58\x0a\x95\x43\x1e\x32\xe9\x03\x0a\xd4\x66\x1f\xa8\xd4\x35\xf8\xa7\xb7\x7c\xc5\x78\xad\x1b\x95\x4f\x31\xbd\x4f\x51\xe8\x38\x20\xa3\xf6\x7a\x6d\x41\xdf\x68\x45\x2e\x69\x08\x5c\xdf\x43\x0a\x5f\x70\x7c\xc8\x04\x9b\xcf\xa7\xcf\x9f\x7d\xce\x96\x13\xac\xc5\x28\x96\xd3\x9b\xd6\x72\xc2\xf7\xe7\x51\x56\x25\x34\x95\x1d\x61\x65\x5e\xfb\xf4\x4a\xdb\xff\x95\x6a\xfb\x85\x4e\x89\x20\xe8\xb6\x96\xd6\xdf\x95\x5b\x19\x9d\xf1\x0e\xe3\x00\x02\x23\x4c\xd7\x7d\x2a\xd3\xc3\xe4\x3a\xc0\x94\x06\xd9\xa9\x4d\xe7\x18\x33\xcd\xec\x9e\xde\x43\x7c\xc2\x52\x14\xe6\xb6\x4c\xa0\x49\x93\xdb\xc5\xfb\xdb\xa7\x31\x7d\xfb\xf6\xf6\xd8\x01\xce\x6a\xdf\x00\x41\xde\xe1\xa3\x5c\x43\xbf\x95\x67\xef\x2b\x62\x6b\x91\xc1\x76\x9e\xbd\xaf\x38\xe4\x1e\xab\x6e\x5f\x13\x1e\x2a\x6f\xd0\xdc\xbd\xaf\x44\xd9\xeb\x86\x52\xb7\xaf\xdf\x88\x25\xbf\x21\x93\x86\x18\x59\xca\x82\xd7\xc5\xca\x6d\xf0\x25\xae\x2c\x9b\x35\x96\x09\x75\x23\x6b\xad\x4a\x6a\x87\x7d\xc6\x6e\x78\x2d\x81\x8c\xbb\x16\x73\x51\x0b\x95\x09\xc3\x7e\x7b\xf0\xc3\xc9\x3b\xc0\xb4\x53\xf9\xfe\x9c\x9d\x22\xc2\x9e\x35\x06\xaa\x95\x47\x59\xe1\xde\x07\x0f\xb3\x01\x7b\x54\x4a\xa3\xf5\xcb\x12\xd6\xd7\x9d\x5c\xea\xd7\xab\xbc\xdd\x25\xb7\x12\x65\x63\x1b\x5e\x00\xff\x63\x56\x34\x46\xde\x3c\xf4\xcb\xe8\xd9\x3b\x4f\x65\xf4\x5d\x5c\x63\x2d\xed\x14\x9f\x17\xd9\x11\x7b\xc6\x7f\x12\x62\x78\xd8\x5d\x4d\xfd\x02\x5a\x85\x70\x56\x42\x9f\xef\x10\x17\x73\x46\x9f\x4f\x41\xb1\x52\x2e\x96\xa4\x00\x43\xa6\xd5\x5c\x2e\x9a\x1a\xe9\xad\xd7\x9a\xf6\x96\x7c\x41\xd8\xd4\xcd\xac\xf8\xfd\x27\xa3\x9c\xd1\x7f\x52\x48\x6e\x62\x9c\xd6\x0d\xee\x33\x2f\x01\x12\x07\xca\x37\xf5\xe1\x45\x6c\x7e\x43\xcf\x61\x3a\x98\xe6\x3d\xbf\xf0\xf8\xc4\xb0\x6b\x52\xfd\x8f\xc8\x02\xbc\x25\x42\xaa\x8f\xb3\xed\x1b\x2f\x1c\xb3\xa8\xf3\x1e\xa7\x31\xd8\xbc\x11\x12\x43\x1f\x04\x68\xa0\x0c\xb9\x32\xa5\xd5\xc4\x49\x7f\x23\xac\xbb\x09\xee\xf7\x45\xe0\x41\x28\x41\xa7\xe8\x70\xd3\xf6\x3d\x63\x4b\x5d\xe4\x98\x8c\xf2\xa9\x8e\xa8\x23\x37\x13\xf6\x56\x08\xc5\xce\x2f\x60\xcf\xdc\x12\x00\xde\x6d\xb0\x73\x51\x12\xdb\x5d\x46\x1c\x10\x74\x26\xe8\x45\x4b\xc3\x2e\x46\xc9\x74\x3b\x1e\x73\x95\x88\x11\x2b\x6a\xf8\xa7\x5d\xb5\x24\x6d\xfc\xb7\x76\xed\x03\x56\x90\xcf\x34\xa1\x3c\xdd\x6d\x65\x9e\xd7\x84\xd6\x0d\x0f\x1e\x3b\x25\x3f\x80\xb2\x4a\x43\xfb\xb4\x4b\x14\x32\x38\x4b\x4a\xa9\x2a\x28\x22\x38\xe0\x0f\xa4\xe7\xcf\x2f\x5e\x12\x75\xfc\xfe\xf7\x1e\x20\xe3\xc4\xec\xef\x1b\x26\xab\xac\x43\xb6\xc6\x4c\xbf\x43\x65\x86\xac\x7e\xeb\x63\x7e\x3a\x04\x8c\xe2\x58\xf6\xd4\x73\xc2\x43\xc7\x14\x8a\x70\xb6\x8e\xe7\x71\x14\x79\x24\x83\x0f\x20\x36\x00\xc8\x3b\x58\x55\x13\x64\xd3\x16\x36\x14\x0a\xe0\xe3\xeb\xcb\x50\x8e\xda\xc2\xdc\x9e\x4a\x8e\x10\x1a\x70\x05\xa1\xc8\xb5\xf7\x66\x6e\x6c\xdf\xbd\xef\xde\xc5\xf9\xe9\x58\xc7\xb7\x92\xf9\x67\x78\x7c\xe3\x22\x7c\x43\xc6\xa9\x41\xf3\x85\x20\x2c\xbe\x2a\xe1\x42\xe7\x77\x18\xea\xdd\xe3\x1c\x99\x3b\x09\xf6\x9d\xaf\x9a\xe2\x0c\x93\x7f\x93\x5c\xcc\xa5\x12\x9e\x0f\x30\xf6\x74\x45\xe9\x46\x30\xd7\x2f\x9a\xa2\xb8\x14\x59\x2d\x62\x12\xa3\xc3\x53\x76\xbe\x26\x67\xcd\x1c\x8e\x58\x13\x67\x38\xbb\xcd\xe9\x79\xe1\xc0\x21\xeb\xc5\xa2\x39\x64\xe2\xd6\xb9\x3d\xef\x3d\xde\x31\x56\x35\x45\x01\xcc\x1e\x6a\x15\x7f\x18\x60\xdd\x4c\x0f\x79\x24\x4d\xa8\x50\xc1\xa2\xb8\xf6\x84\xc4\x99\xeb\x46\xb4\x9f\x1a\x0e\x47\xc5\x8d\x41\x08\xb3\x54\xb9\xbc\x91\x79\x13\xb5\x9e\xee\x33\xc1\x2f\xf3\x4c\x21\xdc\x77\x1d\x45\x4b\xa9\x0c\x7d\xd3\x22\x24\xbe\xd2\x35\x13\xef\xb9\x13\x78\xd4\x3a\x80\xdc\xc0\x95\xca\x75\x76\x2d\xea\x23\x16\x19\x23\x3e\x85\x1f\x7b\x09\x7e\x25\x76\x17\x0e\xab\xc0\x6b\x11\x9a\xae\x61\xf9\x76\xcc\x26\x7d\x72\xa1\x77\x07\x1d\x3f\xc6\x9d\xfd\x0d\x6e\xe0\x4a\xaa\xc5\x04\xfe\xc5\x2d\xa3\x9f\xd3\x44\xab\x09\x9f\xb8\x4b\xff\x84\xdd\x9c\xef\x74\xc6\x8b\xb7\x60\xc7\xbf\x0b\x37\x29\xb8\xe9\x71\x2e\x84\x50\xba\x59\x2c\x61\x31\xeb\x92\x87\xd6\x35\x85\xb0\xd0\x75\xc2\x63\x27\x63\x53\x25\xed\xed\xce\xbd\xb3\x11\x70\x41\xe1\x76\xd3\xde\xa8\x07\x76\x5f\x28\x79\xa7\x35\x26\x86\x5e\x3b\x44\xbf\x26\x04\xe2\x84\x48\x5a\x03\x7d\x23\xea\x1b\x29\x6e\x8f\xbd\x39\x37\xb9\x95\x76\x39\xc1\x55\x33\xc7\xb0\xf0\xc7\xbf\x81\xff\x44\xcf\x04\xa8\x8c\xd8\x49\x9e\xfb\x14\x68\x63\xc4\xbc\x29\x30\xc1\x68\xa6\x8c\x57\xf2\x07\x51\x1b\xa9\x55\x3c\xb8\xf6\x5a\xaa\xfc\x88\x35\x32\xff\xfa\x01\x0a\x26\xa4\x92\x5d\x49\x24\xf9\x55\xfc\xce\xbf\x65\x9e\xd5\x51\xfe\x1b\xaf\x4f\xa7\x6d\xa2\xec\xd1\x42\xab\x45\x8f\xed\x12\xcc\xe8\x73\x25\x6d\xbf\xf0\x85\x47\x29\x48\xdf\x25\x09\xc2\x78\xba\xce\xa1\x40\x56\xea\x7a\x90\x09\x37\x6c\x26\xe2\x9c\xcc\x7e\x01\xaf\x7b\x5c\xe5\x60\x8e\x50\xc4\x60\x62\x9b\xec\x55\x1a\xf2\xd3\x3d\xaa\x37\xab\xd9\x92\xdf\x88\xb6\xe7\x47\x6c\x5b\xa2\x25\x57\x39\xfc\x60\x96\xe9\x3a\xf7\xeb\x2a\x6d\x5b\x81\x8c\xa5\x31\x31\x77\xf1\x2a\x14\xbc\xba\xb7\x95\xab\xf5\xef\x86\x58\x59\x5d\x46\x3d\xde\xdd\x4f\x07\x37\xa4\x51\xf2\x5f\x8d\x60\xbc\xd4\xce\x74\x21\xb7\x26\x5f\x3f\x37\x25\x5f\x81\x65\x0b\x4b\xfa\x5d\xe0\x38\xf1\x34\x68\x51\xe5\xe6\xef\x04\xcf\x65\x8f\xcc\xf3\x88\x7d\x37\x64\xf7\x3c\x72\x2b\x71\x89\xb4\x77\x71\x3a\x7a\x26\x7c\xa1\x6e\x2d\x8c\x6e\xea\x4c\xbc\x43\x88\x54\x19\x6a\x7a\xfc\xa2\x93\x16\xd8\x19\x1a\x96\x5f\x0b\x85\x35\x84\xee\x58\x00\x16\xad\x89\x0c\xb1\x98\x6c\x29\xf2\x06\x2c\xcb\xd9\x8a\xcd\x9d\xcd\xe6\x73\x3e\x4b\xb9\x58\x0a\x63\x83\xf3\x7c\x0c\xb5\x25\x11\x92\xa1\xdb\x1f\xcf\x96\xed\xe7\x83\x62\xeb\xf1\x0f\xb4\xe9\xa5\x08\xa1\x25\x7f\xef\x56\x0e\x9e\x20\x6e\x3d\x59\x8e\x8f\xa2\x99\xa6\x0c\x6f\x13\xfd\xe4\x9a\x29\xfb\xce\x7d\x28\x9a\x72\xbd\xbe\xfa\xc3\xeb\x11\x15\x84\x56\x40\xb7\x01\x99\x40\x36\xe7\x66\x29\xb5\xda\x54\x85\x59\x2c\x80\x32\x6b\x6a\xf7\xfc\x16\xd0\xfe\x92\xe7\xb9\xb3\x4d\x6a\x56\x8b\x52\xdf\x38\x6d\xd6\xa6\x28\x22\x44\x86\x64\x46\x22\xf1\x90\x5b\xaa\x49\xcf\x46\x7d\xca\x86\xe7\x49\x4b\x84\xe2\xb6\x3a\x5b\x7b\xf1\xa2\x5e\x6e\x38\x91\xce\xc8\x0c\x5d\x5b\xea\x46\x75\x65\x04\xee\xfd\xbb\x7f\xd3\x90\x82\xc2\x8c\xc6\x50\x52\xed\x4f\x5e\x2f\xd2\x22\xe7\xfb\x27\xf5\xa2\x41\xf5\xe9\xad\x8a\xae\x1f\x32\xad\xa8\x17\x1d\x40\xf4\x8f\xf7\xf7\x0d\x7b\xf9\xfa\x14\xba\xb1\x39\x17\x56\xc6\x77\xe6\xb4\xbe\x26\xc7\xdd\xbc\xaa\xd6\x37\x32\x77\xb7\xe9\x87\xcd\x54\x7d\xb4\xe4\x5e\x6a\xff\x8e\x24\x7d\xb4\xc8\x41\xcd\xf9\x20\x3f\xef\x2c\xa1\x16\x60\x10\x2f\xb7\x4d\x8e\x3a\xcd\x5f\xdc\x84\xb8\x53\xfb\xf5\xd1\x12\xbd\xd7\x2e\x55\xd5\x58\x6f\x18\x77\x29\x62\x95\x2d\xb9\x5a\x50\x88\xb7\xdd\x01\xe8\xaf\xab\x59\x29\xcb\xdf\xbb\xf9\x3b\xc1\xc2\x64\x9c\x52\x94\x81\x45\x9a\x2c\xd7\x8d\xdb\xf2\xdf\xfe\xf6\x88\x49\xf1\x82\xfd\xb6\xf7\x8b\xe2\x67\x7a\x86\x73\xe9\x47\x99\x7a\x5c\x2e\xb3\xee\x38\xc4\xfb\x29\xb5\x58\xf0\x3a\x2f\x7c\x8a\xe4\xb6\x47\x68\x45\x3e\x00\xe2\xbd\x34\xd6\xa0\x05\x69\xa7\xe9\xc9\xf2\x4f\xf5\x18\x2d\x37\xd7\xee\xf5\x71\x3a\x73\x92\x73\xcb\x27\x3d\xa5\x7e\x8c\x51\xca\x89\x6f\x0a\x3a\xe1\x5e\x95\x74\x0f\xd5\xf1\x6f\xea\x46\x29\xe7\x60\xf2\xf6\xff\x4a\xaa\x09\x9f\x40\x3b\x49\x9a\x17\xf7\x39\xe4\xd8\x88\xfd\x71\x87\x9a\xf9\xac\x55\xc4\xf8\xcd\xc0\x7d\x42\x38\x39\xde\xc3\x6b\x9f\x4d\xdf\xca\xb3\xd3\xd5\xf1\xca\x23\xe8\xf6\xb3\x37\x57\xef\xfe\xeb\xe2\xed\xf9\x9b\xab\x9e\x8a\x4f\x22\xd8\xdb\xa9\xf8\x9d\x8a\xdf\xa9\xf8\x9d\x8a\x7f\xf2\x2a\x5e\xa8\x9b\x24\xf5\x1e\x22\x83\x3d\x15\xd2\x9e\x1d\x1a\x17\x61\xd7\x64\xb1\x57\xd2\x4e\xf1\x59\x71\x7c\x96\x45\x84\x67\xea\xe6\x07\xee\x5c\xf6\xaa\x16\x06\xbc\x1a\x12\xf8\x7e\xdb\xa6\x30\x2f\x92\x56\x1d\xc3\x3b\xc4\xe8\x67\x54\x45\xf8\x08\x95\x80\x23\x55\xf2\xf4\xd3\x2a\xe3\x21\xc3\xdb\xe6\x1f\x6e\x3f\xff\x79\x7e\x7a\xf6\xe6\xea\xfc\xd5\xf9\xd9\xbb\x47\x29\x2b\x48\x68\x65\x38\x34\x34\xc7\xb0\xb4\x70\x7c\xdc\xde\x22\x0a\x45\x66\x71\x71\x23\x75\x63\x58\x00\x46\x8c\xb7\xa7\x66\x13\xc2\x4c\x2f\xfd\xe3\x6a\x05\x4d\x0a\x65\x26\xb6\x6b\x76\x2a\x33\xfe\xc0\xb8\xdc\x66\x2c\x26\x2c\x6e\xbb\xed\xdb\x4c\x46\xa2\xdc\x4d\x43\x73\xdd\x70\x24\x0a\xde\x6a\x6e\x7a\xf3\x91\x28\x72\x54\xa3\x13\xc7\x47\x4d\x4f\xa2\xcc\xbe\xc1\xba\xdd\x00\x4d\x38\x03\xed\xd1\x1a\x9a\xa1\x44\x89\xc3\x6a\x14\x62\xb3\x90\x31\xd4\xe3\xab\x5a\x97\x23\xa8\xc8\x4b\xcc\xb6\x04\x70\xf1\x78\xca\x67\xdf\x78\x88\x57\x82\xa1\x86\x23\x38\xdf\x6d\xb3\x03\x27\x0d\xc8\xd2\x52\x1e\xa7\x84\xd6\xcc\xe9\x5d\x8d\xb1\xca\xe4\x35\xaf\xfe\x2e\x56\xef\x04\xb9\x2f\xc8\xfa\x5e\x8a\x42\x64\xce\x30\x64\xd7\x82\x5a\xc5\xca\xb0\x60\x03\x8c\x3a\x9c\x22\xbd\x3b\xfd\x93\xe9\x81\x9d\xb2\x1c\xe3\xb4\xb0\xbe\x16\xc4\x52\xe5\x30\x36\x5a\x33\x5f\x8b\x15\x23\xd0\xb2\x0c\x87\x81\x13\x43\xdf\x61\x36\x5e\xfb\xea\x91\x9b\x7c\x07\xf4\x51\xe2\x02\x0d\xb0\x4b\xbd\xb0\x43\xa2\xd8\x47\x46\x32\x0d\xc7\x76\x5c\x53\xa2\xd0\x31\x50\x51\xc3\x41\xc7\x48\x0d\xc7\x48\xe7\x35\xc0\x7f\x47\x3c\xb3\x88\xe6\x5e\x25\xda\x3c\x61\xb8\xa3\xdb\x2a\x71\x86\xcc\x8f\xa9\x5b\x20\x56\x2d\x58\xc7\xbb\x2b\x23\x6c\x05\x9d\x49\x80\x85\x83\x36\xde\x1b\xba\xdf\x3d\xa2\x20\x39\xed\x19\xf5\x58\xb6\x17\x81\x86\xdd\xb0\x52\x58\x9e\x73\xcb\xa7\xee\xe6\xa6\x5c\x89\x81\x20\x00\x93\x1e\x75\xff\x56\xf0\x99\x28\x08\x9d\x34\xb6\x88\x07\xe3\x0d\x81\xd5\x47\x00\xfd\x9e\x2a\x9d\x8b\x37\x89\xb3\x07\x41\xde\x9d\x3c\x41\x38\x52\xb2\x48\xe0\x24\x9e\x62\x45\xd4\x51\xf8\x6b\xa5\xf3\xf3\x8b\x11\xc4\x82\x1c\x43\x6e\x0e\xf8\x84\x8c\x21\x38\xd6\x24\xfe\xac\x30\xc6\x31\x89\xba\x97\x61\x44\x15\xea\x25\x8e\xf3\xf0\x43\x6b\x78\x0e\x7f\x7c\x95\xbc\x68\x38\xa4\xe9\xc8\xf6\x15\xb0\x47\xa7\xea\x64\x3d\xef\x3a\x60\x82\x53\x7a\xf3\x7c\xef\x49\x18\x74\xed\x39\x1b\x71\x7b\x81\x6c\x81\xd8\x73\x65\xcb\xec\x30\xb9\x50\x20\x44\x7f\x04\xa1\x6d\x65\x0c\x3b\xb9\x38\x67\x37\x78\x16\x1f\x7d\x33\x02\xa4\xf2\xd5\xbd\xbd\x94\xe1\x37\xa4\x3f\x96\x6d\xa4\xf2\x05\xd6\xe2\x07\xc9\x29\xb7\xa4\xf0\x10\x4d\x95\x07\x78\xaa\x61\x07\xf8\x8f\xd3\xac\x6a\x52\x1e\x07\x2f\xa5\x14\xa5\xae\x57\x47\xe1\xaf\x6d\xcb\x99\x89\xb1\xba\xe6\x8b\xa4\x67\x2d\x4c\x19\xa6\xda\xfd\x0d\x7f\x65\x8a\xfa\xed\x2d\xc7\xe6\x8c\xa9\x31\x4c\xe6\x69\x61\x3a\xbc\x69\xdb\x0a\xfa\xd7\xf0\x76\x26\x1f\xf4\x71\x9e\xce\x2c\xa5\xa1\xd5\x70\x0c\xaf\xf4\xcb\x44\x42\xc9\x30\xc0\x99\x6f\x57\x1c\xa2\x8b\x9e\x55\x36\xd5\x0b\x6c\x49\x48\x00\x31\xae\x6e\xd8\x0d\xaf\x49\xed\x49\xbb\x31\xd2\x7b\x97\xcb\x1b\x69\x34\x91\x07\xab\x15\xb2\xb5\xba\x77\x04\x5b\xc6\xb7\x7d\xc0\x3a\xb7\x71\xcc\x23\xf1\xbe\x82\xee\x78\xad\x8e\x4e\xdd\xdb\x81\x19\xf3\x9c\x4a\xf5\x84\xa3\xe2\xd6\x8a\x5a\xbd\x60\xff\x7d\xf0\x8f\x2f\x3e\x4c\x0e\xbf\x3e\x38\xf8\xe9\xd9\xe4\xcf\x3f\x7f\x71\xf0\x8f\x29\xfc\xe1\x77\x87\x5f\x1f\x7e\x08\x7f\xf9\xe2\xf0\xf0\xe0\xe0\xa7\xbf\xbf\xfe\xf6\xea\xe2\xec\x67\x79\xf8\xe1\x27\xd5\x94\xd7\xf8\xb7\x0f\x07\x3f\x89\xb3\x9f\x3f\x51\xc8\xe1\xe1\xd7\xbf\x4d\x9a\x76\x52\x9b\x68\x1c\xe3\x34\x8b\x1e\xca\x4a\xbe\x1c\xf7\xd0\x38\xba\x1b\xe1\x00\x8e\xa9\x09\xdf\x25\xbf\x16\xc3\xb9\x75\xa6\xe6\xa3\x2b\x2b\x2c\xf5\x7d\xba\x99\x07\x9c\xdf\x80\x1d\x28\x31\xae\x0c\xe1\x91\xcf\xdf\xfa\xf8\xb5\xa6\x31\xc6\x71\xd6\xf1\xd4\xb4\xd7\x2c\xd5\x3f\xac\x75\x19\x38\xac\x01\x14\x03\xac\x60\xc9\xe9\x16\x98\xe3\xb5\x20\x66\x2e\xc3\xd8\xa5\x5c\xe2\xc7\x2e\xe5\x12\x3f\x76\x29\x97\xb8\xe1\x8e\x2e\x92\xc4\xf8\x7c\x4b\xe2\x73\xe8\xc6\xd3\xc8\xb7\x08\x75\x43\x01\x9c\x6c\xc5\x05\x7b\x6f\xa1\xdf\x5e\x30\xfa\xab\xb6\x22\xd0\x36\x41\xc2\xd1\x72\xfd\x93\xd4\xa1\xf1\xba\xda\x12\x34\xe3\xdc\x76\x44\x4b\xdd\x44\x55\xb2\x93\xa2\x60\x52\x21\xd5\xa4\xfb\x85\xd1\x32\xdb\x66\xcc\x02\xc3\x2b\x81\x59\xf1\xc6\x2d\xc9\xed\x92\xd0\x6a\x76\x8d\xec\xd4\x20\xd9\x81\x54\x0b\xcf\xf0\x1f\x0f\x42\x73\x8f\xbb\x47\x5d\x49\xd5\x75\xe6\x0f\xee\x22\x29\xe2\x8a\x28\x20\x6e\x8c\xce\x24\x0f\xd5\x3f\x5d\x97\x47\x62\x7c\x06\x56\xd3\xf2\x6b\xc0\x63\x66\x22\x17\x2a\x13\x53\xf6\x83\xfb\x5d\x86\x7c\xf5\x66\x2b\xe8\x98\xae\x6e\xda\x1a\x90\x06\xcb\x0f\xd0\xec\x89\x24\x6d\xc3\xb1\x31\xc7\x91\xd8\x65\x1f\x11\xd6\xee\x14\x8b\x87\xa1\x75\xe8\x76\x82\x9e\x03\x2b\x10\xe5\x78\x3f\x02\xd4\x4d\x9b\x03\x27\xd4\x1e\xa4\x58\xfe\x69\x16\x77\x8b\x10\x23\x3b\x69\x1b\xa6\x76\x07\x06\x20\xe3\x86\x7c\xb2\xc4\x19\xc7\x9f\x2b\xf4\x2e\xcd\xe8\xbd\x93\xe1\x28\x29\x7c\xb1\x69\xec\x3e\x1d\x73\xf5\xe3\xac\x47\xa3\xe4\x7f\x8f\x46\x31\x34\x47\x30\x32\xd3\x0d\xcc\x7b\x32\x2e\x87\x58\x9e\xb1\x4c\xc2\x54\xf8\x4d\x55\x8b\xb9\x7c\x3f\x82\x76\x3a\xe9\x91\x8b\xcb\x5c\x28\x2b\xe7\x92\x8e\xf9\xd6\x6e\x62\x95\x50\x90\xde\x05\x52\x19\xf7\xda\x26\x25\x78\x3b\x30\xec\x53\x2a\x93\xc1\x70\xc2\x78\x0f\xc4\xe5\x38\x21\x94\xdd\xeb\xb0\x7b\x1d\x76\xaf\xc3\xb6\x71\x8f\xaf\x83\xbf\xbb\x8f\xff\x34\x00\x83\x42\x1a\x3d\xc4\x69\x8f\x67\x07\x6e\x64\x34\x3b\xaa\x9f\x0a\x9d\x23\x95\x10\x38\xe8\xf7\x9a\x6b\x5f\x33\xab\xa1\x07\xfb\x2d\x12\xa4\xc5\x6f\x72\x21\x6e\x44\xe1\x9d\x01\x56\x72\xc5\x17\xc0\x08\xe7\xe4\xfa\x6c\x69\xb4\x48\x5d\x33\xa7\x1a\x6a\x99\x6f\xf4\x36\x01\x37\x3d\xf0\x73\x45\x0b\x76\xc2\x6a\x5d\x14\xa2\x36\xac\x90\xd7\x82\x9d\x8a\xaa\xd0\xab\xd2\x17\x2b\xe7\xec\xd2\x72\xeb\xd4\xc3\xa5\xb0\xf1\x48\x4a\x7a\x57\x80\x40\xdf\x8c\xac\x87\x69\x47\x13\xb8\xa0\x81\x77\x98\x55\x48\xa2\xc8\xde\xaa\xf8\x93\xa9\xe7\xec\xa4\xb8\xe5\x2b\x73\xc4\xde\x88\x1b\x51\x1f\xb1\xf3\xf9\x1b\x6d\x2f\xd0\xf9\x4d\x68\x10\x66\xb5\x17\xcc\xe4\x9c\xbd\x28\xb8\x15\xc6\x32\xcb\x17\x10\xce\xa1\xf0\x36\xfb\xf9\xd6\x83\x09\x76\x7d\xed\xc6\xe8\xe5\x1b\xfb\xb0\x6d\x12\x1a\x83\xa4\x96\xce\x38\x1a\x0e\x42\x3e\x5a\x45\x20\x93\x4c\x3a\x54\x27\x48\x44\xd9\xf5\xfb\xee\xee\x79\xf4\xb2\x22\xc9\x7a\xe8\x4d\x06\xc1\x22\xa9\x58\x2d\x4c\xa5\x95\x11\x14\xc7\xbf\x53\x11\xed\xd7\x62\x90\xd1\x3c\x5a\x1f\x67\x92\x85\x98\x62\x1b\x56\xda\x58\x60\xf5\xa4\xb5\xab\xef\x2b\x90\x8b\x20\x0a\x28\x5f\x79\x51\x10\x1f\x69\x59\x96\x22\x97\xdc\x8a\x62\xc5\xf8\xdc\x8a\x9a\xf1\xc4\x76\xc0\x6e\x3e\xb5\xe0\x81\xdd\x16\x58\x3d\x81\x47\x36\xd0\xda\xd2\x6c\xfa\xf5\xa0\x32\x36\xbc\x85\xc8\x2d\xb5\x18\xdd\xb3\xd8\x7e\x84\xe0\x96\xe6\x3d\x04\x85\x0e\x96\x6c\xef\xb1\x4d\xb0\xe1\xbb\x4f\x9f\x15\x3a\xbb\x36\xac\x51\x56\x16\xbe\x5b\x83\xbe\x26\xca\x2c\xab\x02\x54\x63\x82\xe6\x6c\xff\x38\x69\xaf\xf5\xc4\xcd\xc8\x1c\xff\xa6\xfb\x9f\xe0\x1f\x28\x36\x77\xa2\x27\x97\xea\xc7\x89\xf7\x22\xa3\x5a\xe9\x83\xeb\xfa\x56\x09\xb0\x5c\x00\x8b\xac\x09\x0f\x7d\x18\x1e\xde\x3c\xd7\xce\x18\x74\xe7\x35\xb5\xcd\x36\xeb\x37\x6d\x39\x7b\x2f\xb2\xf6\xef\x74\x77\x0e\xba\x4b\x65\x81\x05\xdf\x3d\x1e\x54\x0f\x6e\x04\xc8\xcc\x18\x80\x15\x22\x59\x5d\x7f\xac\xb5\x46\x04\x79\xf4\xbe\xd4\x38\x50\x27\xa2\xa8\x42\x2a\xc0\x86\x79\x32\xbb\x24\xb9\xbd\x36\x03\xed\x1d\x4e\x83\x00\x38\x51\x1b\x3d\x20\x93\x24\x06\xa2\x81\xf0\xfd\xee\x49\xa8\x35\xb9\x77\x38\x8e\x83\xfd\xe3\xfd\xc3\x8d\xec\x6f\x12\x6c\x0c\xfb\x78\xa1\x39\x85\x04\x1f\x7e\xc6\x69\x3b\x64\x98\x91\x65\x05\x4d\x42\x45\xb6\x9f\x1f\x31\x69\x53\x4f\x93\xd2\x16\xc8\x83\xfd\xee\x7b\x16\xc4\xb4\x7d\x37\x9a\xd9\x9a\xe7\xd2\x7b\xb1\x20\xd1\xfd\x02\x5b\x37\x68\xac\x26\x6e\xd7\x87\xfd\x23\x26\x6c\x76\xc8\x6e\xb5\xda\xb7\x70\xc4\xd2\x10\x51\x57\xd8\xfb\x26\x7c\x3c\x30\x2b\x2b\x91\x0a\x97\x70\xd7\xb2\x2a\x64\x26\x6d\xb1\x02\x63\x8d\xe9\x26\xed\xa4\x02\x13\x31\xb7\x81\xa9\xf2\xec\xbd\xb4\xbe\xc2\x30\x49\xac\x9e\xb3\x67\x60\x5c\xa1\x01\xc7\xb8\xf3\xc0\x6f\xc4\xf1\x52\xf0\xc2\x2e\xd3\x6e\xac\xbb\xa5\x4a\xab\xc9\xbf\x45\xad\x81\x0b\x53\x79\xa9\x29\x3b\x46\x4f\x3a\xf7\x07\x31\x01\xbd\x39\x91\x84\xb8\xa0\xb3\xbb\xbe\x15\x24\xcf\x80\x6d\xb4\xa2\xbb\xba\xba\xf8\x56\xd8\x91\x9e\x72\x37\xb3\x50\xe2\x03\x59\x11\x51\xcf\x75\x5d\x3e\xe2\x9b\x9e\x0a\x82\x9d\x40\xcf\xbb\x47\x34\x28\x96\xda\x90\x77\x9a\x6d\x6f\x3c\xc8\x4b\x92\x5b\xde\x1f\x99\x56\x4a\x64\x6e\x8f\x07\xa5\xa0\xc9\x2f\x7f\xa5\x73\x76\x7e\x31\x65\xff\xa5\x1b\x68\x51\xc1\x67\x51\x8d\xaf\x36\x47\xe0\xba\x37\xc2\xb2\x3d\xf7\xf9\x7b\x34\xca\xc1\x6e\xb8\x33\xfe\x37\xc1\x73\x51\x1b\x78\x9f\x04\x27\xb0\x89\x76\x63\x14\xa0\x62\x6f\x4e\xe3\x59\x9e\x8d\xb1\xba\x64\x4b\x14\x9b\xfa\x04\xf5\x08\x35\xbd\x7e\x48\x7b\x7d\x9d\xde\xc2\x38\xb7\xf3\xc0\x2b\x7c\x83\xfc\x5c\x7f\x15\xaf\xc4\x86\x96\xc6\xfd\x4d\x92\x19\xa4\xce\x84\x61\x9c\x65\xfd\x0d\x4e\x94\x6b\x75\xdb\xe1\x53\x2a\xdc\x1c\xec\x30\x33\xc2\x4b\x99\x58\x06\xc1\x46\x29\x85\x60\x74\x0a\xcf\xa1\x08\x00\x13\x26\xc9\x18\xa7\xae\x82\x8d\x82\xc6\x67\xdb\x92\xfa\xa3\x9c\x28\xd6\x56\xcc\x27\x2f\x3a\x1b\x0f\x0f\xce\xd2\xa8\x42\xfb\xe3\xbe\x97\x2d\xfd\xa0\xb1\xb1\xd6\xad\x4a\xa4\x64\xd8\x24\x64\xc0\x96\x52\xc2\x24\xfa\x30\xf8\x20\x81\xba\x32\xa2\xbe\xa1\xc0\xb7\xbb\x31\xce\x52\x69\x5a\xc8\x3f\x8c\x2d\xbc\xbd\x35\x53\x4d\x39\x4b\x3c\x59\x2d\xb3\x50\x6d\xc7\x5d\xfc\x1e\xbd\xf6\x9b\xf4\x69\x06\x30\x42\x30\x35\xb8\x5a\x08\xf6\x3c\xd5\x78\xf9\xc3\xff\xfe\xdf\xbf\xff\xdf\x53\x5c\x4e\xff\x1b\x12\xfd\x5b\x76\x7e\xf2\xe6\xe4\x9f\x97\x3f\xbc\x04\x66\xd4\x24\xd4\x4a\x6a\xf9\xee\x78\xc5\xbb\xa3\x95\xee\xde\x5b\xe1\x2e\xd0\xeb\x24\xe9\xee\x21\x9e\x06\xc4\xf9\x9e\xbf\xc9\x31\x4a\xef\x53\x75\x3d\x29\xd3\xdc\x14\x6d\x7a\x59\x7d\x27\xd1\xa9\xb9\x47\xd5\x6f\x36\xab\x2e\x75\x76\x3d\x52\xe4\x62\xff\xea\xe5\x05\x8a\x1b\x21\x78\xc1\x55\x48\x43\x48\x75\xa3\x8b\x1b\xe8\x64\xcd\xae\x5e\x5e\x24\x29\xf2\xa9\x93\x00\x59\x37\x64\x90\x5d\xb9\xb9\x06\x72\x12\xb2\x64\x04\xc0\xb5\x1d\xa8\x81\x0c\x87\x17\xd2\x58\x99\xd1\x65\xbe\xbc\xe8\x25\xdb\xdd\x8c\xa9\xb8\xb7\x5d\xcc\x66\xe4\x98\xcd\xfe\xdb\x51\x6a\x10\xfb\xa1\x9f\x7e\xf8\x26\xd1\x49\xed\xd4\x4b\x2f\x7c\x93\x52\xba\xf9\x04\xad\x28\x30\x4b\xc0\x8e\x4a\xf5\x44\x76\x56\xd4\xce\x8a\xfa\x54\x49\x4f\xcf\x8a\xaa\x6a\x71\x69\x75\x35\x02\x22\x09\x05\x8d\x88\x47\x9a\x89\xb9\xae\xc5\x18\x80\xa4\x1e\x58\x28\x6f\x40\x59\x72\x05\xdc\x7a\x3e\x6c\x49\x92\xab\x07\xe0\x1e\x2c\x98\x35\x4d\xb6\x0c\x59\x33\x25\x8c\x39\x36\xd1\x4d\x9c\xbb\x01\xc1\x36\x00\x4d\x35\xb5\x38\x72\x3b\x25\x4a\x58\xed\xa3\x34\x02\x31\xb7\x9c\x42\xa1\x20\x61\x33\x4c\x4b\x07\x8c\x16\x31\x8b\xec\x2c\x21\xdc\x76\xec\x6f\xd7\x47\x6b\x65\x35\x37\x4b\xa2\x1d\xa7\x6b\x26\xde\x4b\xdb\x36\xb5\xe6\x46\x2b\x4c\xfe\xfb\x2d\x95\x9a\x16\xf9\x97\x86\x55\xdc\x18\xec\xb3\xdc\x03\xa9\xc1\x2f\x22\x49\xbc\xd0\xf9\xfe\xbe\xe9\x4f\x8c\x2d\x6a\x9e\x09\x56\x89\x5a\x6a\xda\x75\x00\x46\xdb\x5c\xdf\x2a\x36\x13\x0b\xa9\x4c\xb8\x13\x6e\xc2\xfe\xc2\xd1\xb2\x8d\x5a\x5f\x0b\x80\x98\x87\xee\x87\x53\xf6\xae\x6d\x98\x41\xdb\xa9\x79\x20\x08\xcb\x74\xd7\x59\xc8\xaf\xea\x18\xa0\x3f\xa8\xad\x86\x6b\xd6\xf0\x82\x98\x41\x6a\x15\x41\x28\xc8\x87\x85\xdc\xd8\x38\x92\xec\xfe\x66\x6f\xa2\xff\x52\xd6\x74\x3b\xfa\x8f\xbc\xa4\x6e\xf3\x3b\xf4\x9f\xbb\x62\x2d\x9a\x30\x69\x51\xd7\x4f\x3c\x60\x72\x04\xcf\x96\x94\xd6\x7c\xec\x93\xdb\xf3\xed\x60\x89\x11\x63\x07\x4b\xdc\xc1\x12\x7b\x63\x07\x4b\xdc\xc1\x12\x77\xb0\xc4\x1d\x2c\x71\x07\x4b\xdc\xc1\x12\xb7\x8e\x1d\x2c\xf1\xd3\xc7\x2e\xc4\xbd\x83\x25\xee\x60\x89\x9f\x38\x76\xb0\xc4\xa7\xf3\x4a\xec\x60\x89\x3b\x58\xe2\xf6\xb1\x83\x25\x12\xc6\x0e\x96\x48\x1b\x3b\x58\xe2\x27\x8f\x27\x98\x50\xdf\xc1\x12\x77\x09\xf5\x5f\x1a\xff\x09\x09\xf5\x30\x76\xb0\xc4\xc7\xd4\x6f\x3b\x58\xe2\x0e\x96\xf8\x29\x63\x17\xb3\xd9\xc1\x12\x9f\x92\x15\xb5\x83\x25\xee\xac\xa8\x5f\x1a\xbf\x76\x2b\x2a\x80\xe7\x2e\x6a\x3d\x4b\xa4\x00\xbd\x00\x20\x8a\xcc\x3c\x96\x4e\xcf\x13\xb0\x84\x61\x5a\x53\xf6\x72\x88\x49\x82\xee\x19\x9e\x4a\x2c\x5a\xaa\x47\xcb\x75\x58\x3f\x33\x42\x2f\x86\x68\xe8\x4c\x20\xeb\x34\xc7\x95\xc6\xff\xaf\x03\xce\xf4\x10\x33\x18\x67\xa3\xf1\x22\x3e\x38\xdb\x1e\x15\x26\xf3\x51\x88\x0c\xb5\x2f\xd5\x1d\xf0\x98\x1e\xcc\x85\x24\x76\x08\x8d\x19\x09\xe2\xf2\xc8\xf0\xa6\x44\x58\xcb\x1d\x90\x96\x24\x27\xe7\x0e\x38\x8b\x07\x25\x24\xa5\x31\x3b\x28\x4b\x1f\x8e\x42\x16\xd9\xc2\x58\xb6\x41\x51\xc8\x52\x6b\xad\xed\xf8\x30\x94\x3b\x20\x28\x29\x13\xdd\x06\x3f\x09\x10\x12\xb2\xd0\x35\xe8\xc9\x1a\x7c\x84\x3e\xd7\x0d\xd8\x49\x0f\x3a\x42\x96\xda\x83\x9c\xac\xc3\x46\xc8\x32\x03\xdc\x64\x0b\x64\x84\x2c\x53\x37\xf6\x3e\xe0\x22\xf7\x05\x15\x19\x1d\x26\x92\x9e\xfc\x4b\x4c\xfc\x25\x59\x7e\xbe\x28\xe1\x6a\x59\x0b\xb3\xd4\x05\x49\x59\x0f\x14\xf5\x6b\xa9\x64\xd9\x94\x4e\xaf\x18\xa7\x5b\xe5\x0d\x4d\xad\xfa\x89\x99\x56\xfd\xa1\x61\x85\x39\x43\x27\x5c\xe6\xa2\x26\x06\x44\x9c\x6c\x77\xa4\x80\x26\x77\xc9\x21\x70\x63\x9a\x2c\x13\x22\xa7\xbe\xdf\xfd\x98\xd9\xef\xa7\xed\x2a\x60\x73\x32\xa2\x1e\x7c\x4e\x7f\xf3\xe9\x1e\x05\x36\x04\x07\x09\xbf\xff\x32\xfa\xe7\x13\x50\x46\x1f\x47\x18\x91\x5f\xfd\x11\xd1\x45\x89\xe6\x54\x4a\x84\x8a\x1c\x9d\x4a\xb5\xe1\x52\xa2\x52\x77\xa2\x88\x42\x28\x89\xae\xf2\x86\x08\xa2\x7e\x18\x89\x2c\xb3\x8f\x1e\xda\x82\x00\x4a\x30\x3a\xc6\x44\xff\x8c\x82\x03\x4c\x44\xfd\xdc\x07\xe2\x67\x3b\xda\x07\xf4\x00\xfd\xa9\x1f\x17\xe9\xf3\xe8\x0f\xfd\x1d\xe8\x9e\x0e\x9f\x93\x12\xcb\x1a\x22\x7b\x7a\xd8\x9c\x14\x44\xd3\x38\xa8\x9e\x27\xd3\x6f\x3d\x29\x94\x9b\x8a\xe2\x19\x07\xc1\x33\x72\x8b\xef\xd1\x20\x28\x23\xa1\x76\x46\x8a\x82\x8e\x80\xd6\xb9\xcf\x65\x4a\x47\xe9\x24\xaf\x53\x0a\x3a\xe7\x3e\x90\x39\xe3\xa1\x72\xd2\x97\x26\x21\x8f\x74\x1f\x48\x9c\xad\xf9\x23\xbf\x60\x09\x81\xb4\xb5\xdc\x51\x72\x76\x66\x98\x37\x5a\xcb\xfd\x90\xa5\x86\xac\xd4\x58\x79\x9f\xa4\x9c\xcf\x38\xf9\x9e\x51\x72\x3d\xf7\x82\x96\x49\x43\xca\xdc\x8d\x92\x71\x4e\x69\xca\x49\x1d\x22\x64\xd6\x50\x2e\x29\x26\x6b\x0a\x3a\x26\x69\x13\xa5\x92\x56\xf2\xe2\x54\x14\x7c\x75\x29\x32\xad\x72\x92\x55\xb0\xd6\x9f\xd2\xe7\xac\xe7\xcc\xa0\x48\xd2\x97\x61\x54\x65\x58\xed\xbe\xe4\xbe\x79\x3c\xd1\x9c\xf4\x64\x09\x21\x71\xe7\x0d\x4a\xc6\x21\x35\xe6\x56\x82\x94\x5a\x63\x4f\x2c\xbd\xc6\x1e\x3d\x7c\x83\xe5\xfe\x63\x1d\xa8\xbf\xe9\x5b\xa6\xe7\x56\x28\x76\x20\x55\x38\x53\xb4\x80\x78\x17\xb8\xe9\x62\x81\xed\x15\xa6\x4a\x7c\xfe\x2c\x4c\xea\x3f\x37\x60\x07\x61\x4f\x63\x9e\x5e\xfc\xd7\x4f\x6c\x7b\x00\x98\x98\x8e\x0c\x41\xe3\x20\x7c\xde\x14\x83\x20\x70\x42\x30\x79\x88\x9a\x7c\xde\xb6\x85\xa6\x9d\x23\xf8\xe6\x56\xdb\x71\x95\x33\x4f\x3f\xd4\x1e\x54\x92\xdc\x70\xb8\x3f\xc7\x83\x9a\x84\x03\xbd\x1f\x0c\xe8\x9d\xf8\x4f\xc4\x71\x92\x64\x7e\x04\xfb\x89\x18\x4e\x92\xd4\xad\xb8\xcf\x21\x7e\x93\x18\x1d\xa6\x61\x3e\x77\x91\xf4\xb8\x71\x07\xb6\x13\x82\xea\x64\xfb\x75\x0b\xae\xb3\x0d\xaa\xa7\x18\xc5\x23\x60\x3a\x9f\x90\x0f\x3e\x0e\x8e\x73\xe7\x83\xef\x7c\xf0\x8d\x31\xba\x0f\x6e\x65\x29\x74\x63\x9f\xa4\x43\x78\xbb\x94\xd9\xb2\x6f\xc7\xc9\x52\x18\xa6\x1b\xe2\x3b\x39\xb0\xb6\xfc\xf4\x46\x33\x8e\x76\x5e\x61\x3b\x28\xf9\x82\x2d\x41\xcc\x75\xfa\xbb\x16\x2d\x19\xfd\x31\xdc\x30\xce\x4e\xdf\x5c\xfe\xf3\xbb\x93\x6f\xce\xbe\x9b\xb2\x33\x9e\x2d\xfb\x0d\x85\xe3\xd1\x61\x1c\x5e\x2b\x50\x52\x4b\x7e\x23\x18\x67\x8d\x92\xff\x6a\x04\x3e\x90\x07\xed\xef\x3a\x8c\x3f\xa9\xc9\xf8\x5f\xb2\x96\x71\x2f\x4d\xb4\x06\x18\x6c\xdc\x77\xd2\x40\xa7\x63\x90\xe4\x01\x63\x9a\x50\xb1\x36\xaf\x75\xb9\x5e\x6e\x70\xe6\x44\xa1\xb9\x4c\xb2\xc6\x96\xa2\x16\x6c\x21\x6f\x3c\x52\xd6\x37\x39\xe7\x79\xcb\x04\xe5\xee\xae\x3b\xf2\x14\x0a\x48\x3e\x03\x48\xdb\x52\x30\x25\xac\xbb\xda\x6d\x08\x51\x2b\x93\x44\x62\xdb\x18\x61\x8e\xd8\xac\x01\x30\x63\x55\xcb\x92\xd7\xb2\x58\xf5\x27\xcb\x8b\xf8\x43\xf6\x46\x07\x47\x66\xd5\x2d\x29\x2e\xd1\xe9\xdb\xb3\x4b\xf6\xe6\xed\x55\x7c\x20\xa8\x46\x1a\x5c\x40\xf5\x81\x3c\xd8\xc6\x99\x70\xbf\x01\x0f\x02\x21\xe0\x76\xa2\x56\x28\x0c\x1f\x03\x69\x98\xf3\x44\x84\x72\x42\x89\xb6\x90\x37\x58\xd9\xde\xb3\x29\xfc\xbf\x3d\x77\x0a\x6a\x67\x5c\x05\xb0\x67\xb4\xc8\x6c\xa3\x28\x00\xcd\x35\x39\x2b\x04\x2c\x43\xfc\xbb\xdc\x9d\xa5\x11\x6a\x02\xe8\x38\x04\x22\xfe\x60\x0d\x88\xed\x57\xe7\xc2\x6d\x64\x2d\xaa\x5a\x18\xa1\x48\x1e\x03\x6f\x2f\x18\x1c\x0a\xa9\x18\x67\x4e\x2b\x14\x7d\x5d\x11\xbf\xd8\x29\xbe\x25\xdd\xb3\x9c\x74\x73\xbe\xa0\xa8\xb4\x34\xf7\x72\xf0\xbb\x69\xa6\xf1\x56\xb7\x67\x1e\x5c\x16\xa2\x89\x8b\xaa\x22\x64\x67\x2b\x9d\xef\x1b\x76\x7e\x11\xee\x28\xd5\x01\xb8\x5a\x4a\xd3\x79\x15\xce\x72\x93\x39\x4e\x14\x53\xa5\x54\x80\xf4\x33\xf6\x17\xf6\x9e\xfd\x05\x9c\x9e\x3f\xd0\x26\x97\xee\x5c\xa4\x85\xc9\x30\xce\x70\x7e\x31\xc2\x19\xf8\xd1\xe9\x7e\x27\xcd\xed\x98\xd5\x6c\x26\xc9\xac\x92\x6e\xf3\xc5\x7b\x2b\x6a\xf7\x36\xfb\x13\x95\xb2\xbe\x64\xb7\xcb\x7d\xce\x93\xbe\x22\x98\x9b\x3c\x9f\x77\x16\x32\xf5\x30\xdb\x07\xb9\x24\x6e\xaa\x7f\xd3\xc6\xbe\x41\x4d\x4e\x94\x29\x4d\xef\x7b\x7b\x33\x2f\xb9\xcd\x96\x44\x99\x83\x87\xca\xb9\x74\xc6\x76\x6a\x92\x1a\xe0\xca\x35\xc4\x65\xb1\x86\x62\x29\x89\x1a\xec\xf1\x95\x04\x1d\x06\x36\xb8\x03\x83\x73\x0a\x0b\x42\x5c\xd7\xbb\x42\x44\x8c\xce\x64\xeb\x9d\xb8\x1e\x3d\x7c\xa5\x73\xf4\x17\x89\x12\xdd\xa2\xe5\x3d\x5b\x65\xe0\x36\x52\x63\x59\x7d\x67\xd3\xd7\x39\xfb\x84\x17\x51\x22\x6a\x23\xa7\xbb\x33\xae\xb0\xa4\x75\x2e\xea\x1a\x8a\x7e\x88\x22\x67\x2b\xc0\x54\xc9\x4c\x24\x1d\x78\xb2\xd6\xae\x6a\x6d\x75\xa6\x89\x64\x06\x43\xb8\x99\x17\x05\x8b\x4c\x4f\xd3\xb0\x90\xdf\x63\xdf\x9f\x5e\x1c\xb1\xab\x97\x17\x47\x4c\xd7\xec\xf2\xe5\xd5\x45\x32\xb2\xc4\x6a\xb6\x77\xf5\xf2\x62\xef\xc1\x97\x7a\x10\x8f\x74\x9e\xd9\xa4\xe4\xd5\xe4\x5a\xac\xa2\x4d\xd2\x34\x63\x78\xd2\x6e\x78\xf2\x07\xe0\x6a\x94\x3c\xae\xb9\x46\x2d\x78\x2e\x9f\x60\xe1\xba\xbf\x84\xdd\xfc\xd6\x2b\xd8\xa3\x25\x82\x7a\x28\xf5\x8d\xc8\xd1\xc1\x0f\xbf\x41\xa8\xbc\xd2\x92\xe2\xd6\xed\xca\xe0\x3f\x36\x76\x65\xf0\x1f\x1f\xbb\x32\xf8\x8d\xb1\x2b\x83\xdf\x95\xc1\xef\xca\xe0\x77\x65\xf0\xbb\x32\xf8\x4f\x99\xc0\xae\x0c\x3e\x4e\xf6\xae\x0c\x7e\xfb\xd8\x95\xc1\xdf\x39\x76\xe0\xbd\xb8\xb1\x2b\x83\x67\xbb\x32\xf8\x4f\x1c\xbb\x32\xf8\x5f\x1e\xbb\x32\xf8\x5d\x19\xfc\xae\x0c\xfe\xa3\x63\x57\x06\xff\x69\x63\x57\x06\x7f\xe7\x78\x42\x10\xfc\x5d\x19\xfc\x0e\x82\x7f\xb7\x94\xa7\x05\xc1\x67\xbb\x32\xf8\xc8\x1f\xdf\x95\xc1\x7f\xf2\xd8\x95\xc1\x7f\xea\xd8\x95\xc1\x7f\xba\xc4\x5d\x19\xfc\xae\x0c\x7e\x57\x06\xff\xc9\x63\x57\x06\xbf\x36\x76\x65\xf0\xbb\x32\xf8\x4f\x1f\xbb\x32\xf8\x08\x29\x4f\xc7\x07\xdf\x95\xc1\xef\x7c\xf0\xbb\xa5\x3c\x2d\x1f\x7c\x57\x06\xbf\x2b\x83\xdf\x3a\xe8\xa6\x57\x2d\x8c\x6e\xea\x2c\xfe\xd1\x1b\x9e\xa4\x97\xba\xac\x1a\x2b\xd8\xbb\x20\xae\x7d\xc3\xa3\xbf\x66\xb6\xc2\x3a\x99\x9e\xf6\x7b\x78\x98\x6d\xa6\xd5\x5c\x2e\x9a\x1a\x6a\x97\x8f\x4b\xae\xf8\x42\x4c\x32\xfc\xc8\x49\xbb\x66\x93\x76\x8e\xc7\x9f\x09\xd4\xb6\x90\xa5\x8c\x2f\x9f\x67\x1b\xfb\xfd\x1d\xc8\xe9\x35\xd0\xa7\xbe\x73\x25\x7f\x0f\xee\x24\x2f\x75\xa3\x2c\xa2\xc8\xf1\x28\xb5\xab\x4c\x53\x4a\x45\xa1\x6f\x9d\xdb\xf6\xd4\x36\x9e\xa5\x9b\xc7\x1d\x1d\xc0\x45\xa2\xcd\x5a\x71\x6b\x45\xad\x5e\xb0\xff\x3e\xf8\xc7\x17\x1f\x26\x87\x5f\x1f\x1c\xfc\xf4\x6c\xf2\xe7\x9f\xbf\x38\xf8\xc7\x14\xfe\xf0\xbb\xc3\xaf\x0f\x3f\x84\xbf\x7c\x71\x78\x78\x70\xf0\xd3\xdf\x5f\x7f\x7b\x75\x71\xf6\xb3\x3c\xfc\xf0\x93\x6a\xca\x6b\xfc\xdb\x87\x83\x9f\xc4\xd9\xcf\x9f\x28\xe4\xf0\xf0\xeb\xdf\x12\x1d\x32\xb2\x61\x30\x86\x59\x30\x82\x51\x30\xba\x49\xe0\x91\x14\x23\x5c\xe9\x77\x5e\x52\x22\xdc\xc0\x59\x00\xa5\x8f\x11\x6d\x5c\x6a\x92\xc4\x7a\xe3\x4d\x81\xe2\xcc\x30\x5f\x9a\xc3\x6c\x98\x2e\xa5\x75\x5e\xf7\x5c\xd7\x7d\xee\x8b\x23\x26\x69\x2e\x57\x1f\xaf\xe4\xd5\x23\x94\x8c\x70\x4b\x0d\x37\xf6\x10\xbc\xbd\x62\x44\x6d\x97\xa2\xbe\x95\xc4\x16\xdb\xce\x39\x51\x5d\xbc\x00\xd4\xdc\x24\x17\x73\xa9\x84\xcf\x3b\x3f\x48\x9c\x7d\xa7\x66\x77\x6a\xf6\x97\x64\x3c\x15\x35\x6b\x44\xd6\xd4\xd2\xae\x5e\x6a\x65\xc5\xfb\xe8\xb0\xc3\x50\xcb\x5e\x7a\x61\x4c\x57\x58\x49\xe0\xa3\x29\xf1\x81\x7d\xac\x4c\xaa\x1b\x05\x95\xc0\xd1\x86\x4e\xa5\x0b\x99\xad\x8e\xc3\xc7\xc1\x55\x13\xef\xed\xf1\x3d\x59\xd6\x96\x9b\xeb\xee\xda\x8b\x89\xf3\xa1\xba\xdb\xbd\x31\x8b\xcf\xc4\x9c\x06\x2b\xf3\xa2\x96\x37\xb2\x10\x0b\x71\x66\x32\x5e\x80\x46\x4b\x7f\x8d\x4f\xee\x90\x4c\xcd\x8c\xd8\x5a\x17\x86\xdd\x2e\x85\x7b\x3d\x18\x77\x5f\x0d\x21\xaa\x8c\xd3\x44\x2e\xb8\x54\xac\x74\x5b\x5f\x85\x49\xba\xb3\xec\xde\x16\xe2\x93\x5c\xf1\x5a\x28\x1b\x26\x36\x45\x2e\x94\x99\xd6\x85\x2f\x95\x2a\x68\xa8\xcc\xf6\xdb\x7d\xfd\xa6\xd2\xff\x54\xe2\xf6\x9f\x6e\xd6\x86\xcd\x0b\x4e\x53\x30\x81\x40\xc9\x08\xbb\xd1\x74\xbc\xfd\x04\x92\xe4\xbb\x36\x1e\x6b\x65\x88\xc0\x0d\x5e\xdc\xf2\x15\x6c\xff\xfa\x5c\x25\xf1\x49\x7b\x7e\x08\xaa\x87\x1b\xd6\xce\x35\x67\x5f\x1e\x02\x12\xe0\xe5\xc9\xc5\x3f\x2f\xff\xeb\xf2\x9f\x27\xa7\xaf\xcf\xdf\xd0\x1f\x6e\xb7\xf7\x82\x70\x3c\x33\x5e\xf1\x99\x2c\x24\xf5\xbd\xde\x40\xf6\xf5\x05\x52\xb9\x06\x78\x9e\x1f\xe7\xb5\xae\x70\x0f\xea\x46\x01\x37\x58\x47\xdb\x91\x1e\x25\x73\xfb\x1a\x48\xc3\xe0\x58\xd2\xea\x5f\x07\x1f\xbb\xa8\xb9\x72\x36\x32\xc4\x64\x44\x42\x81\xb7\x1b\x75\xa3\xac\x2c\x3f\xc7\x42\x57\x9e\x8f\x53\xe4\x7a\x92\xe7\x22\x1f\xac\x70\x52\xfe\xe4\xa9\x40\xd5\x5f\x86\x0f\x5a\x75\xb4\x69\x64\xc9\x8c\x5d\xbc\xbd\x3c\xff\xff\xad\x5d\xba\x55\x95\x50\xbc\x9b\x6a\x4d\xba\x7b\x3b\xca\xfe\xbf\xf3\x8c\x04\xbb\x13\xf0\xf1\xf1\xa4\x4e\x40\x6b\xdf\xa4\x63\x67\xde\x35\x6a\xc0\xe8\xda\x93\x4d\x0b\x9e\xea\x5c\x4c\xd9\x05\x9a\x1b\xc2\x8c\x20\xb1\x7b\x90\x00\x7c\xe7\xc4\x2a\x2b\x79\x51\xac\x98\xf8\x57\x23\x6f\x78\x41\xdd\x59\xab\xb1\x38\x7d\x40\x07\x96\x84\xd1\xb4\x9a\xcd\x79\x61\x12\x1e\x14\xaa\x85\xe1\x0c\xbc\xd7\xba\x51\xe9\x58\x95\x56\x12\xcb\x85\xd2\x36\x21\x90\xee\xbe\x08\x38\xdb\x6a\x9d\x31\x8c\xbb\x25\x82\x6b\x07\x2f\xbe\xc1\xca\xff\x60\x60\x10\x23\x5a\x7e\xbf\x2f\xda\xaf\xc6\xac\x61\x63\x12\x3e\xbc\xbb\x50\xde\xc0\xe8\x22\x71\xd4\x2f\xaf\x05\xcf\x81\x47\xa4\xe2\x76\x89\xf8\xac\x92\x9b\x6b\x91\xe3\x3f\x10\x91\x3f\xce\x9f\xf1\x11\x4c\x74\xfb\xdb\x65\xb8\xa2\x2a\xb7\xb9\xe0\xb6\xa9\x05\xf8\x31\xbe\xa4\x4b\x28\x3e\x2b\x68\x68\xd9\x24\x35\xe9\xd6\xec\xad\x2a\x56\xef\xb4\xb6\xaf\x5a\xbe\x88\xe4\x0b\xf2\xa3\xf7\x58\x87\x29\x41\x1a\x40\x07\x18\xb6\xdd\x3c\x27\xb0\xb9\xa0\x8e\x7a\xd4\x16\x29\x27\xda\x79\x67\x8f\xa4\x8c\xea\x46\x9d\x98\x6f\x6b\xdd\x90\x6c\x94\x0d\x67\xe7\xdb\xf3\x53\xd0\xd5\x0d\x1d\xbb\x22\x94\xad\x57\xc0\xb2\xb4\xc9\x8f\x9e\xe4\x1b\x7f\xef\xf4\xc4\xda\x2d\x77\x7e\x7d\xa3\x8c\x20\xe2\x0c\x5e\xf3\x15\xe3\x85\xd1\xc1\x99\x97\x8a\x5d\x00\x42\xba\x1f\xf2\x9b\x32\x76\x4e\xf3\xa6\xbc\xc8\x99\xb6\x4b\xb6\x26\x94\xcc\x00\xb8\x39\x3f\x64\xa4\xa1\x57\x8d\xb5\xd9\x05\x37\xd7\xf5\x69\x5a\x7e\x4d\xb4\x52\xab\x5a\x64\x22\x17\x2a\x4b\xb8\x15\xa3\x60\x22\xfe\xf0\x15\xed\x56\xbd\xd1\xca\xa9\xb3\xe4\x7b\x75\xae\x72\x99\x71\x7c\xdd\xb9\x1d\xe1\xa5\x03\xc4\x95\x8f\xbf\x70\xa0\x3b\x71\xca\x8c\x24\xb4\x31\xa2\x86\x7c\x9a\xad\x1b\x81\x07\xe9\xef\xcd\x4c\x14\xc2\xd2\x88\xd7\x18\x42\x6d\x64\xce\xad\x00\x69\xb2\xe4\x0b\xc1\xb8\x0d\x17\x97\x6a\xeb\x09\x65\xdc\x53\x87\x09\x35\xcb\x72\x2d\xd2\x08\x83\xb8\x61\xdf\x9f\x9f\xb2\x67\xec\xc0\xad\xdd\x21\xbc\xf2\x73\x2e\x0b\x28\x55\xb7\x9c\x84\x21\x65\xeb\xd1\xb5\x79\x98\x2a\x2c\x31\xe8\x29\x92\x58\x5d\xe3\xf3\x72\xc4\x94\x66\xa6\xc9\x96\x61\x8d\xa5\x56\xf4\x7d\x9a\x89\x50\x75\x01\x48\x89\xa1\x2a\xa4\xe5\x55\xef\x54\x9f\x29\x22\xef\x52\x9f\xdb\x54\x21\x79\xd7\x10\x1f\xf6\x31\x55\x48\x13\xed\xd4\xe7\x28\xaa\x30\xc9\x40\xf8\xde\x88\x7a\x14\xfb\xe0\xfb\x27\x6c\x1f\xf4\xc3\xa1\x4e\xaf\x0d\x76\x93\x76\xf8\x40\x79\x95\xc2\xf2\x9c\x5b\xee\x6d\x8d\x34\x26\xc0\x9d\xc5\xb1\xb3\x38\xb6\x0d\x23\xbe\x93\xaa\x79\x8f\xe0\xff\x71\x12\x17\x97\x67\x20\x92\x65\x29\xfa\x0b\x0e\x2a\xaf\xaa\x42\x22\xaf\xdc\xb0\xe3\x0d\x49\xe6\xf9\xe0\x1a\x1d\x8d\x97\x5b\xc0\xbc\x1c\x2f\x0a\xed\xcc\x2d\xe7\xed\x71\x95\x13\xfa\x9a\xb8\xb1\xb6\x78\x80\x13\x12\x83\x9e\x4c\x53\xb8\xcc\xc4\x84\xdc\x4e\x01\x3c\x55\x05\xf0\x68\x69\xa6\x42\xdc\x08\x22\x15\xf8\x7a\x8b\x2b\x27\x89\x49\x13\x0e\x31\x39\x72\x0e\x53\x62\x05\x9f\x89\x02\x2d\x6f\x54\x04\xe4\x34\x28\x5b\x37\x91\x1f\x8d\xc5\xa3\xd6\xc5\x38\x4c\x06\xef\x74\x01\x65\x05\x3c\x79\xb1\xdd\x94\x7e\x95\x6b\x0d\x02\xc6\x58\xeb\xab\x55\x35\xd2\x5a\x43\xb8\xfc\xd7\xb8\xd6\x0d\xd1\xd0\x67\xeb\x6b\xed\x3c\x86\x71\xd6\x1a\x4c\xf1\x5f\xdb\x5a\xdf\x4a\x95\xeb\x5b\x33\xa6\xb9\xf6\x23\x8a\x0c\x6f\x63\x46\x7d\xfe\xad\x54\x0b\xd3\x37\xd9\x78\x51\x24\x43\x4d\xb6\xd9\x6c\x01\xc3\x48\xea\x1d\xc7\xfc\x36\x6e\xda\x16\x01\x5d\x45\x3d\xff\x88\x0f\xff\x88\x1d\x44\x33\xd9\xb6\xc7\x1b\x76\x76\xd0\x96\x5f\x9e\x66\x07\x2d\x4a\xc3\x5f\xd6\x6e\xfa\x56\xf2\xe2\xb2\xa2\x75\x07\x60\xeb\x57\xec\xdb\xd7\x97\x27\x43\xb1\x64\xf5\x23\x01\x4b\x57\x63\x80\xd3\xc9\x65\x3c\x2f\xa5\x31\x54\x80\xa6\x1b\xb7\x62\xb6\xd4\xfa\x9a\x1d\x04\x14\xed\x42\xda\x65\x33\x9b\x66\xba\xec\x01\x6a\x27\x46\x2e\xcc\xb1\x57\x3e\x13\xb7\x52\x74\x06\x72\xa9\x0a\xa9\x7c\x2e\x12\xbc\x1c\x65\x8d\x8f\xc8\x90\x65\xc2\x62\x64\xed\x2a\xc3\xf9\xc6\xfe\x42\x64\x91\x1e\x7c\xb6\xb9\x7d\x49\x45\xcb\xc0\xb4\xe7\x51\xa6\xc8\xba\xe7\xde\xba\xa2\x5a\xf2\x09\x18\xbd\x64\xc1\xbe\x8b\x01\xa4\x18\x97\x5a\xe9\x1a\xf1\x73\x48\x5a\x97\x50\x78\xef\x5e\x4e\x4c\xee\xc2\x72\xf8\xc7\xc2\xad\x0a\x7d\x0d\x7a\xe9\xe3\x47\x33\x58\x36\x6f\xfb\x9b\x04\x0e\xcb\x5f\xb8\xf1\x49\x47\x06\x7b\x53\x20\xaf\x82\x0f\x5c\xae\x1d\x77\xb2\x68\xb8\x26\x18\xb5\x5c\x3b\x93\xf4\x73\xd8\x9d\xe5\x6d\x67\x32\xe5\x3a\xfa\xb3\xbc\x7e\x26\xc9\x22\x7b\x67\xf9\x69\x9c\xc9\x36\x64\x3e\xda\x51\x84\xd0\xb9\x17\x48\x0d\x46\xb3\x6e\xa9\x7a\x21\xf8\x5e\x38\x9d\x2c\x75\xdc\x30\x3c\xdb\x82\x4c\x1e\x86\xe3\x13\x9e\xac\xfb\x08\xc9\xb3\x4f\x0e\xcb\x93\xc5\xdf\x83\x41\xca\x3e\x6e\x94\x92\x65\xde\x53\x22\x8c\x6d\x4b\x86\xf5\x74\x5d\x8a\x5a\x9e\x09\xdb\x57\x75\x25\x5f\xb9\x6d\xcc\xa5\x01\x2c\x14\xdd\x24\x93\x76\xd9\x7f\x6c\xdf\xf5\xd5\xc2\xe7\xf9\xea\x7a\x9e\xb1\x11\xfa\xbf\x5d\xf6\x24\x31\x19\x10\x0e\xd1\xdf\x13\x10\x11\xee\x18\x23\x8f\x64\xcb\xdd\x06\x1d\xe4\x81\x07\x53\xfe\x9b\x72\xb5\x87\x4d\x3c\x95\xc6\xd2\xd9\x1e\xb9\x64\xb4\x44\xdf\x4e\x2a\x67\x8d\xb2\xb2\x08\xa0\xb4\xb2\x2a\x9c\x69\x3e\x98\x39\x69\xb6\x20\xaf\xd7\x60\xee\xa8\x5d\x18\x7a\x0f\x3c\x4f\xcb\x79\xc4\xfe\xa7\x31\x96\xf1\xb6\x0e\x2a\xb0\xcf\xc1\xfe\x45\x8b\x0e\xa4\x78\x70\x7d\x7d\x3b\x4e\xe7\x79\x03\x91\x63\xad\x6f\x28\xcd\xb6\x72\x39\x9f\x8b\x50\x03\x36\x13\xac\xe2\x35\x2f\x85\x05\x2c\x32\x0d\x34\x33\x13\x0b\x89\x85\x36\x7a\xce\xb8\x5b\xc8\xfd\x7d\xd3\x51\xa2\x1d\x41\x31\x4e\xb4\x54\x69\x59\x29\x17\x4b\x74\xcf\x19\x67\x85\x56\x0b\xa0\xbb\x71\x1f\x5f\x68\x1e\xaf\x6f\xe0\x21\xd3\x35\xbb\xe5\x75\xc9\x38\xcb\x78\xb6\x04\x28\x0e\x57\x2c\x6f\x48\x46\x0c\xf4\xa0\x58\x4d\x8c\xe5\x56\x30\xe7\x8d\x03\x52\xa4\xdb\x2f\x45\x40\x0b\xf5\xe8\x57\x50\x8e\x73\x98\x14\xda\x99\x41\xa9\xc5\xdf\x28\x84\xab\x06\x27\x6f\xa0\x54\xa8\x32\x41\xb1\x3e\x29\x8e\x9f\x5d\x63\xc5\x5d\x63\xc5\x88\xb1\x6b\xac\xb8\x6b\xac\x48\x15\xb9\x6b\xac\xb8\x6b\xac\xb8\x6b\xac\x18\x31\x81\x5d\x63\xc5\x38\xd9\xbb\xc6\x8a\xdb\xc7\xae\xb1\xe2\x9d\x63\x47\x07\x1d\x37\x76\x8d\x15\xd9\xae\xb1\xe2\x27\x8e\x5d\x63\xc5\x5f\x1e\xbb\xc6\x8a\xbb\xc6\x8a\xbb\xc6\x8a\x1f\x1d\xbb\xc6\x8a\x9f\x36\x76\x8d\x15\xef\x1c\x4f\xa8\xa9\xc3\xae\xb1\xe2\xae\xa9\xc3\xdd\x52\x9e\x56\x53\x07\xb6\x6b\xac\x18\xf9\xe3\xbb\xc6\x8a\x9f\x3c\x76\x8d\x15\x3f\x75\xec\x1a\x2b\x7e\xba\xc4\x5d\x63\xc5\x5d\x63\xc5\x5d\x63\xc5\x4f\x1e\xbb\xc6\x8a\x6b\x63\xd7\x58\x71\xd7\x58\xf1\xd3\xc7\xae\xb1\x62\x84\x94\xa7\xe3\x83\xef\x1a\x2b\xee\x7c\xf0\xbb\xa5\x3c\x2d\x1f\x7c\xd7\x58\x71\xd7\x58\x71\xeb\xa0\x9b\x5e\xc6\xe6\x32\xba\x0f\xc8\x7d\x50\x9f\x7a\xa4\x65\x8f\x14\x67\xd6\xcc\xe7\xa2\x06\x53\x19\x66\x49\x08\xa2\xac\x85\x29\x02\xb5\x7f\x00\xb1\xc7\x4b\x44\xb3\xcd\x08\x7b\x04\x0c\xad\x58\x53\x8e\xd3\xf3\xbf\x2e\x5a\x66\x37\x3d\xcf\x0a\x04\x3d\x30\x6a\x61\x80\x35\x54\xb1\xb3\xb7\xaf\xe2\xaf\x50\x2a\xdf\x2b\x9d\xca\x0d\xd6\xe2\xad\xca\xd2\xca\x36\xba\x43\xb5\x65\xff\xa8\x67\x2b\x2b\xb4\xc1\xaa\x54\xdc\xb0\x6c\xc9\x95\x12\xde\x45\x8c\x3f\x0b\x16\xc2\x5e\x33\x21\x14\xd3\x95\x50\x88\x0e\xe7\xcc\x48\xb5\x28\x04\xe3\xd6\xf2\x6c\x19\xbf\x73\x3f\x2e\x85\x0a\x07\x0a\x5b\xa9\xf4\x66\x6c\x6c\x2d\x78\xbc\xfa\x83\x83\x55\x8b\x92\x4b\x9c\x2a\xe3\x59\xad\x8d\x61\x65\x53\x58\x59\xb5\x93\x8d\x5f\x57\x01\xb5\xe4\xc8\x1a\xd9\x6e\xbc\x9b\xb7\x11\x08\x1a\xac\x1b\x11\x8f\x66\x6c\xbf\xde\x2f\xab\xee\xd3\xd1\x83\xa3\x1c\x2f\x53\x1a\x26\xca\xca\xae\xda\x92\x18\xc1\xe6\xb2\x36\x96\x65\x85\x04\xbf\x09\x56\x80\x52\x18\xa4\x71\xbe\x47\xe0\xc6\x5b\x68\x9b\x02\x2b\x6d\xfc\x52\x13\xb8\xac\x9c\xf9\x58\x59\x83\x05\x17\xdd\x84\xfd\x54\x73\x69\xbc\x49\x6f\xe2\xd7\x81\x07\x32\x73\xa8\x04\x69\x57\x1a\xae\x46\x4e\x9a\x6c\xf8\x5a\x2f\xa2\x37\x5d\xf2\x5b\x00\x0c\xe4\x3e\x9e\xdc\x2a\x6b\x64\x0f\x0f\x0a\x8d\xf0\xe5\x9b\x75\xa3\x81\xe9\xb6\x53\xe5\xc4\xb3\x0a\x17\x4c\x89\x1b\xa7\xa3\x44\x26\xe4\x0d\x18\xc3\x4e\x73\x93\x63\x98\xe1\x43\x1f\x4c\x71\x5b\x51\x97\x52\x41\x01\xce\x6b\x61\x0c\x5f\x88\x0b\x42\x0e\xf8\x2e\x4f\x16\xd2\xc0\xe1\xf0\xc5\x5f\x32\xb8\xae\x05\xb8\xb3\x9d\xa1\xd9\x41\xdb\x09\xd8\xf6\xde\xe7\xb2\x12\xbf\xb7\x6d\x97\x75\x5b\x4b\x6b\x29\x25\x57\x06\x5b\x1a\x40\x35\xea\x3a\x07\xe1\xfe\xbe\x49\xa2\x71\x7f\x1d\x26\x89\x93\x73\xbf\xcc\x19\x8c\x2a\x47\xb8\x37\xc1\x2b\x9c\xd5\x52\xcc\xd9\x5c\x2a\x5e\x78\x34\xf7\x11\x92\xf6\x72\xac\x98\x32\x46\xd4\xa4\x66\x6e\x1e\xdb\x1b\xd6\x75\xca\x7e\xf4\x0b\x6b\xeb\x46\x39\xeb\x8e\xd8\x6e\x5b\x30\xa5\x73\xc1\xe4\x9c\x2d\x00\x35\x5e\x63\xf1\xd9\x57\xcf\xfe\xfc\x07\x36\x5b\x39\xd3\x3d\x5a\xee\xd5\xd2\x1d\x2a\xcb\x8b\xf6\x10\x14\x42\x2d\xdc\x59\xc5\x27\x92\x82\x9c\xef\x75\x27\x09\x27\x0a\x1a\x5a\xe3\x46\x3d\xff\xf2\x7a\x46\x36\xe8\xc0\x1f\x3a\xce\xc5\xcd\x71\xef\xfc\x4e\x0a\x4d\xa8\xd0\xdd\xec\x56\x4e\x2b\x17\x23\xb8\xc8\x5b\x54\x0d\xb4\x79\x4c\x52\x36\x81\xbd\x9c\x2d\xf5\x2d\x9c\x95\xde\x6f\x89\x5e\x9c\x70\x18\xba\xfa\xaf\x4a\x57\x4d\x81\x55\x85\xaf\x24\x21\x9c\x0a\x27\xa1\xf1\xf6\xe7\x80\xc6\x66\xab\xde\xa5\xd8\x21\x61\x8a\x6b\x56\x33\x5e\xed\xf0\x49\xd1\x82\xb5\xe7\x02\xf0\x39\x95\x96\x80\xbc\xa9\xc5\x94\xbd\xe2\x45\x31\xe3\xd9\xf5\x95\xfe\x4e\x2f\xcc\x5b\x75\x56\xd7\x84\x54\xfe\x60\x6d\x0a\xee\xac\xb2\x65\xa3\xae\xb1\x23\x33\xd5\x88\x28\xf4\x82\xe9\xc6\x56\x8d\x0d\xb5\xc4\x5b\x54\x7e\xbc\x6a\x93\x48\xa3\x88\xc6\xa4\x37\xf9\x7a\x8b\x2d\xde\x4b\x8a\x7a\x83\x12\x7e\xae\x98\x70\xeb\x87\x45\x56\xfd\xf9\x9b\xa0\x40\x28\xc7\xe2\xcb\x67\x5f\xfd\x09\xd5\x23\xd3\x35\xfb\xd3\x33\x28\x8e\x33\x47\xf8\x90\x3a\x9b\x85\xf2\xce\x99\x92\x17\x85\xa8\x87\x69\x31\x77\x31\xa6\x5e\xb1\xc4\x6b\xf8\x4e\x11\x3d\x98\x1e\xb2\x69\x2a\xe7\xde\x42\x1f\x57\x57\xff\x05\x71\x0f\x69\x8d\x28\xe6\x04\x5b\xb7\x30\xba\x6b\xe2\xb3\x0f\x46\xea\xbe\xc7\xfc\x3a\x9f\xec\x73\x0a\x28\xdc\xe8\xa2\x29\xc5\xa9\xb8\x91\x59\x7c\x6a\x66\xb0\x5d\x03\x49\x81\x26\xa9\x90\x84\x14\x8b\x9e\xb3\x59\xa1\xb3\x6b\x96\x7b\x61\x1d\x9c\xda\x5b\x2a\x74\xa3\xa1\x57\xb5\x0e\x84\x21\xa1\xbe\x9c\xb6\xe2\x14\x38\x3b\x11\xc6\x7e\xe7\x5a\x27\x01\xd8\x39\x2b\x79\x55\xb5\xe4\x08\x35\xbf\x1d\x2c\x3d\x41\xa2\xd3\xb5\x52\xf5\xfd\xc1\xf8\xeb\x90\x94\xe0\xa4\xa7\x37\x27\xfe\xab\x49\x16\x02\x11\xdf\x9e\x96\x17\xed\xe6\x4b\x4b\xe8\x0c\x0e\x55\x27\x2c\xad\x4e\xbc\x02\x09\x58\x2c\xbc\xce\x29\x45\xcd\x68\x06\x7e\x18\x9c\x63\x6b\xf8\xbb\xc3\x4b\xae\xb1\xb0\x9a\x96\x3c\x49\xcc\x5d\xd1\x6b\x08\x06\xbb\x05\xf9\x67\xc8\x0d\x96\xdc\xa2\xcb\x4e\x5c\x87\x40\x65\xc7\x59\x25\x6a\x23\x8d\xb3\x9d\x7f\x00\x05\xf3\xb2\xe0\x92\x96\x0e\x6a\xd3\x03\x94\xae\xf6\x2c\x65\x91\x51\x35\x42\x47\xbd\xb4\xf7\xec\x42\xe7\x5e\x18\x3c\x40\xd8\x4d\x51\x2a\x02\x5f\xf1\x1a\x51\xc0\xa0\xe0\xff\x91\xcc\xb9\xc7\x7a\xb8\x7e\xe8\x76\x27\xf5\xdd\x72\x32\xda\x87\x0b\xe5\xb6\x8f\x0f\x41\xe2\xe7\xf8\x5c\xc1\x1a\x7c\x3e\xaf\x55\x3b\xdd\x11\xd4\x1f\x3c\x53\x7e\xbb\xc7\x78\x61\xba\xc8\xfd\x52\xf8\x6b\xdf\x85\x27\x88\x32\x7d\xb4\x72\xca\x10\xce\xa8\xb4\x0d\xd3\x64\xfb\x2f\xf6\x1f\xe5\xf1\xc1\x2d\xa8\x75\xc5\x17\xe0\xa9\x8f\xb0\x13\xeb\x22\xc9\xb8\x21\x0c\x20\x08\x03\x21\x26\x90\x8a\x18\xf5\xca\xcb\x26\x3f\xf4\x90\x45\x0e\xa5\x0b\x1e\xb3\xe5\x03\x0a\xd4\x5e\x28\xa8\xd4\x91\x4a\xee\x96\xaf\x18\xaf\x75\xa3\xf2\x29\xe6\x17\x29\x0a\x1d\x07\xa4\xbe\x5f\xaf\x2d\xe8\x1b\xad\xc8\x98\xea\x40\x85\x3e\x64\x38\x06\xc7\x87\xcc\x3f\xfa\x7c\xfa\xfc\xd9\xe7\x6c\x39\xc1\x5a\x8c\x62\x39\xbd\x69\x2d\x27\x7c\x7f\x1e\x65\x55\x42\xcf\xdd\x11\x56\xe6\xb5\x4f\xaf\xb4\xed\x71\xa9\xb6\x5f\x68\x24\x09\x82\x6e\x6b\x69\xfd\x5d\xb9\x95\xd1\x29\xb7\x30\x0e\x20\x30\xc2\x74\xdd\x67\x7a\x3d\x4c\x2e\x44\x4a\xe9\x1f\x9e\xda\x93\x8f\x31\xd3\xcc\xee\xe9\x3d\xc4\x27\x2c\x45\x61\x6e\xcb\x04\x9a\x34\xb9\x5d\xbc\xbf\x7d\x1a\xd3\xb7\x6f\x6f\x8f\x1d\xe0\xac\xf6\x0d\x30\x74\x1d\x3e\xca\x35\xf4\x5b\x79\xf6\xbe\x22\x76\x5e\x19\x6c\xe7\xd9\xfb\x8a\x43\xee\xb1\xea\xf6\x35\xe1\xa1\xf2\x06\xcd\xdd\xfb\x4a\x94\xbd\x6e\x28\x75\xfb\xfa\x8d\x58\xf2\x1b\x32\x6b\x81\x91\xa5\x2c\x78\x5d\xac\xdc\x06\x5f\xe2\xca\xb2\x59\x63\x99\x50\x37\xb2\xd6\xaa\x14\x64\x9e\xeb\x1b\x5e\x4b\xe0\x2a\xaf\x05\x10\x8d\x66\xc2\xb0\xdf\x1e\xfc\x70\xf2\x0e\x40\xb5\x54\xc2\x31\x67\xa7\x88\xb0\x67\x8d\x81\x72\xc9\x51\x56\xb8\xf7\xc1\xc3\x6c\xc0\x1e\x95\x53\x65\xfd\xb2\x84\xf5\x75\x27\x97\xfa\xf5\x2a\x6f\x77\xc9\xad\x44\xd9\xd8\x86\x17\x40\x40\x97\x15\x8d\x91\x37\x0f\xfd\x32\x7a\xfa\xc0\x53\x19\x7d\x17\xd7\x68\x13\x3b\xc5\xe7\x45\x76\xcc\x82\x24\x72\x5f\xc0\x37\xde\xd1\xf3\x30\x00\xf0\x08\x67\x25\xb4\x41\x0f\x71\x31\x67\xf4\xf9\x14\x14\xd2\xd4\x52\x02\x0c\x99\x56\x73\xb9\x68\x6a\x64\xff\x5e\xeb\x69\x5c\xf2\x05\x61\x53\x37\xb3\xe2\xf7\x9f\x8c\x52\x3a\x17\x71\x0c\xfe\xc3\x12\x06\xff\xe3\x98\x32\xe8\x51\x61\x99\x6c\x29\xf2\x26\x2a\x57\x8d\xd4\xce\x3a\x67\x5a\x59\xcd\x78\xdb\xe3\x09\xe6\x08\x50\x2c\x60\x6a\x8c\x90\xa8\xb4\x9a\x40\xb2\x14\xcf\x52\x98\x53\x1d\x18\x20\xc3\x3f\xc4\xc8\x1c\xce\x12\x42\xaf\x6e\x7e\x47\x8c\x1b\xd3\x94\xa8\xda\xa0\xfd\x76\x84\xcc\xb9\xb4\x80\x3a\xd3\x4d\x9d\x89\x10\xf3\x70\x4a\x2d\x02\x54\x42\xdc\xfb\x4b\x51\xc0\x85\x25\xee\xff\xfe\x9b\x9e\x0c\x3c\x04\x26\xfc\x2d\x16\x6e\x15\x4a\x39\x00\x77\xda\x16\x66\x6a\x00\xae\xcc\xa5\x65\x5a\x41\x23\xf7\x3c\xea\x66\x5d\x0e\x66\xd3\x77\x6c\x50\x16\xb0\x5d\xf3\x99\x28\x62\xce\xc0\xda\xe4\x66\xdd\xd1\xca\xb1\xee\xc6\x1f\x8a\x98\x89\x46\x96\x05\x04\xf5\x03\x3e\xf0\x31\x37\x46\x2e\xd4\xa4\xd2\xf9\xc4\xfd\xde\xe3\x4f\x47\xd3\x90\xa2\x73\x3c\x0f\xbc\xa6\x17\xa4\x90\x17\xe1\xac\xea\x1b\x51\x2f\x05\x8f\x08\x04\xae\xa1\x01\xfd\xcf\xb3\x5a\x54\xb5\x30\x00\x80\x41\x36\x30\xbc\x75\x11\xb3\x0f\x53\x71\x37\x5e\x67\xc0\x0f\x80\x18\x8a\xba\x41\x26\x75\x0e\x07\x23\x8e\x57\x82\xb3\x85\xbc\x11\x8a\xbd\xc3\x57\xee\x65\xc1\x8d\x19\x04\x26\xe2\x1b\xbe\xf1\xc6\xea\x16\x39\xc4\xb8\xed\x1a\x70\x21\x00\xd7\xc3\xda\xfa\xbf\x31\x46\x7a\x2b\xcc\xbd\x7b\xb5\x46\x44\xc6\xf9\x7c\x6c\x99\x00\x78\x41\x06\xf4\xa3\x76\xe9\x63\xb5\x8a\x7f\x56\xbb\xa6\x25\x2c\x03\x10\x5f\x78\xb1\xa2\xc0\x7b\x57\x6b\x1f\xb8\x7d\xda\x91\xed\x08\x6a\xe1\x6e\xe0\xb6\xa9\x79\xcd\xe5\x1c\x86\xc8\x60\x4c\x77\x50\x8b\x5a\xf0\x7c\xe5\xbe\x1f\xb6\x68\x30\xfb\xa8\x17\xb5\x67\xf5\x40\xe5\x38\x68\xd7\xce\x04\xba\xd0\xf9\x65\x25\xb2\x18\xd0\x48\x7b\x31\x83\x75\x16\xe0\xfb\xa1\x67\xa0\x33\xdf\xa4\x8a\x0a\x6f\xb6\x06\x59\x5d\x0b\x53\x69\x95\xbb\x5b\xd9\xff\xe8\x23\x52\xc0\x45\xda\x41\x35\x43\xa3\xdc\x54\x01\x86\xd5\xf1\x08\xff\x5b\xd4\x51\x89\xd3\x2d\x6a\x7f\x21\xed\xf4\xfa\x4f\xa0\xf3\x85\x5a\x72\x95\xa1\x29\x70\x7c\x2d\x2a\x73\x6c\xe4\x02\x55\xfc\x97\xcf\x9e\xff\xf9\xd9\x97\x5f\xfe\x01\x94\x7e\xd8\xe9\x69\x19\xb3\x4c\x77\x36\x76\x73\x1f\xa2\xe7\xec\xef\xed\xfb\x13\x21\xf4\xe6\xf9\xf4\xf9\x1f\x8e\x7e\xa1\xc5\x1b\x3c\x91\x11\x42\x7b\x4d\xdd\x2e\x74\xde\x1e\x99\x80\x4e\x79\xe2\xcf\x5d\xc5\xad\x15\xb5\x7a\xc1\xfe\xfb\xe0\x1f\x5f\x7c\x98\x1c\x7e\x7d\x70\xf0\xd3\xb3\xc9\x9f\x7f\xfe\xe2\xe0\x1f\x53\xf8\xc3\xef\x0e\xbf\x3e\xfc\x10\xfe\xf2\xc5\xe1\xe1\xc1\xc1\x4f\x7f\x7f\xfd\xed\xd5\xc5\xd9\xcf\xf2\xf0\xc3\x4f\xaa\x29\xaf\xf1\x6f\x1f\x0e\x7e\x12\x67\x3f\x7f\xa2\x90\xc3\xc3\xaf\x7f\x1b\xa3\x7b\x63\x0b\x5f\xe9\xc5\xae\xe4\x02\xd7\x91\x8a\x5a\xab\x5a\x38\xaf\x40\x6a\x15\x0b\xfd\x1d\x06\x15\xd7\xc4\x04\x54\x17\xfe\x2d\xd2\x9c\x0c\x73\x52\x0b\x67\x3c\x18\x34\x28\x0a\x7d\x0b\x05\x23\x52\xd7\xd2\x46\xb9\xd4\x6f\xa1\x09\x05\x7b\x23\x6e\x44\x7d\x14\x66\xfa\x9d\x13\x77\x11\xa4\x51\x82\x8b\x56\x6f\x95\xe5\x3b\x8c\xb9\xd7\x85\xd4\xac\x6a\x5d\xfb\x8c\xd5\x1e\x72\xbd\x21\xe4\x1b\xad\x2e\xda\x75\x6e\x27\x1f\x8d\x71\x23\x9c\xde\xb0\x87\xc4\xa3\x76\x05\xa5\xcd\x7e\xbe\xf0\x2a\x4e\xd9\x0f\xbc\x96\xba\x31\x0c\xb1\x12\x51\x6f\x78\x59\x69\x05\x16\x30\xe2\x9e\xdb\x57\x00\x1c\x2d\x0f\x2b\x0e\xbf\x2e\xc6\xde\x08\x44\x04\x21\xd5\xd7\x2e\xf1\x49\x6b\x21\xbd\x6c\x2d\xa4\xb8\xf3\xd1\x9a\x80\xd2\xba\xbb\x72\x13\xa6\x5f\x87\x32\x58\xec\xe9\x1c\xeb\xc8\xb7\x2d\x4e\xc5\x76\x2b\x2e\xd8\xcf\xd1\x11\x02\x5c\x50\x98\x5a\x58\x05\xb0\x3c\xde\xf0\x52\xe0\xef\x5b\xca\xc5\x32\x6a\x15\x5a\x8b\x08\xa3\x19\xf8\xf3\x83\xbd\x8a\x3d\xc2\xb1\xaa\x9b\x52\xea\x5d\xad\x7f\x3d\xf1\x0a\x0c\x3b\x95\xb5\xcd\xd4\xc2\x89\x8b\x0a\x0b\xb6\xeb\xc5\xf6\xf0\xf6\x80\x49\x35\xc9\x6a\x69\x65\xc6\x8b\x3d\xd0\x40\xe1\x7f\xca\x8a\xc6\x58\x51\x77\xff\x6b\xcc\xa3\x5a\x0b\x66\x6f\x35\xce\x9c\x17\xec\x5a\xac\x6e\x75\x9d\x07\x9b\x3e\x7c\x45\xe4\x21\x80\x9d\x37\x36\x7c\x86\x14\xa6\x6b\xc3\xe7\xb6\x48\xd4\x6c\x26\x62\x2f\xc3\x86\xd8\xd5\x94\x9d\xa8\x95\xcf\xe2\xb7\x78\xba\x08\x99\xb3\xd6\x74\x77\xda\x1b\xbc\x1a\xf4\x8e\x07\x17\x22\x1e\x19\xe4\xbf\x95\x23\xe5\xfb\xf4\x8e\xc0\x71\x1c\xb4\xae\x55\xae\xc1\x0b\x09\x31\x63\x5d\x33\xec\xe6\x01\xba\x2d\xaa\xd3\x16\x14\xec\x07\x41\xf7\xfa\xba\x38\xef\x4e\x2a\x61\xcc\xb7\xee\x4a\x50\x43\x24\xc3\x1b\xc6\xc1\xd1\xf1\x72\x23\xbe\x7a\x01\xb7\x32\x2c\xa3\x70\x0a\x0b\x7c\x23\x30\x72\x74\xde\xc9\x8c\x31\x67\x4e\xe0\x47\xa1\x34\xd7\xf9\xb1\xb7\xee\x71\x71\x13\x94\xd6\xf4\x2b\xde\xe2\xfa\x15\xa2\xac\x93\x37\xa7\xa1\xf9\x3f\x5a\xf9\x66\xd8\xd7\x33\xce\xe5\x6e\xbf\xce\xaf\x03\x78\xed\xbe\x08\x4a\xfc\xab\xe1\x05\x24\xa7\xae\xea\x46\xc4\xa8\x11\x92\x93\x28\xec\xad\xae\xaf\x8f\x9f\x3d\x7b\xf6\x47\xf0\x11\xe1\x73\xff\xd7\x97\xdf\xfc\xaf\x2f\xbf\x99\x96\x79\xac\xcb\x14\x07\x89\x8c\x06\x43\xae\x63\x4b\xdf\xf5\x4f\x74\xd8\xe2\x78\xd8\x75\x9b\xc5\x04\x62\x24\x38\x44\xed\x3e\x47\xc7\x2b\xa3\x15\x15\x05\xbc\x38\xe9\x26\x78\xb5\xaa\x62\xbe\x96\x8a\x3e\x1c\xfc\xbe\xd4\x84\x60\x27\x09\xd7\xde\x30\x02\x22\x98\x77\x73\xea\x01\xa5\xf7\x0d\x69\xef\x70\x14\xd2\x58\x7c\x38\x20\xf8\x0f\x69\x9a\x55\x45\xac\x6b\x89\xd4\xcd\x40\x26\x90\xe4\x69\xee\xbf\x43\x21\xac\x42\x47\xd3\xe9\x52\xaf\xb3\xbc\xea\x8b\x7c\x3e\xbb\x55\x9d\x06\x57\xf1\x04\xb8\x5f\x8e\xd8\x5b\xf5\x0a\x0b\x2c\x63\x82\x78\xe0\x67\xb6\x1e\xa5\xbb\x6c\x28\x6e\x54\xb6\xa3\xe3\xdf\xf8\xb5\x9c\xe0\x32\xc4\xaa\xb0\xb8\x6d\xeb\x45\x0b\x13\xac\xd6\xfd\x77\x6b\x72\xba\x6b\x11\x85\xc3\x1e\x44\x6c\x51\x11\x85\xab\x01\x89\x26\x7c\x0a\xd8\xa2\xd6\x4d\x15\xb3\x71\x68\x87\x76\x08\x95\xd0\xa0\x16\x1b\xa7\x63\xa6\x73\xca\xd8\x79\x4c\x3f\x4b\xa5\x87\xd3\x6d\x13\x9b\x70\xf5\xbc\xc9\xee\x4c\xb7\x18\x03\x2d\xc3\xb0\x6d\xc8\xba\x21\xe9\x05\x46\xf5\xeb\x46\x81\x11\x08\x11\x88\x18\x77\xb5\x66\xbd\xe4\xf0\x5e\x21\x16\x3c\x5b\xed\x0d\xe7\x1e\x19\xc5\x5f\x03\x17\x60\xf7\x57\x59\x62\x33\x3c\xfc\x06\xb8\x7f\x51\x71\x39\x5f\xef\x0b\xb6\x34\x2a\x45\x30\x7e\x1b\xe3\x97\x32\x8f\x66\xf9\xf0\x87\x9b\x2d\xb9\xca\x21\x6d\x43\x8e\x42\x7b\x49\x13\xf8\x34\x42\xf4\x79\xa3\xd2\x30\x35\xf4\xfc\x55\x74\x30\x38\x4a\x2d\xb4\x10\x82\xd1\x3c\xd9\xc1\x89\x8e\xca\xef\xe5\xd2\x54\x70\xa3\x30\x3a\xd6\x1a\xad\xed\x1c\x83\x6b\x14\x21\xf3\xee\x89\x0d\x7f\x5d\x94\x69\x87\x8f\x42\x37\xad\x7b\xdd\xa0\x61\x1b\x7e\xaa\xda\x5e\xeb\xe6\xcf\x96\xba\xc8\x41\x19\x62\x88\x32\x66\x45\xbd\x24\xc6\xad\xad\xe5\xac\x71\x5e\x01\x57\x39\xf4\x31\xed\x93\x4b\x45\x89\x84\x58\x97\x99\xb2\x8e\xe3\xa6\x0f\x7e\x03\x7d\x31\x65\xec\x52\x44\x45\x16\x9c\xd1\xd6\x5b\x05\xb0\x32\xc2\xe6\x41\xdc\x09\x6e\xa6\x88\x23\xeb\xc2\x00\xdb\x3d\x27\x68\x68\xa6\xef\xdc\x7c\xeb\x9e\xcb\x38\x93\x77\x70\x4c\xf6\x4e\xda\xc8\x8e\x69\x2a\x4f\x83\xcc\x0b\x7c\x86\x63\x4d\x4b\xa7\xd3\x79\x55\x15\x12\x2b\xea\x86\xf6\x1d\x23\xd4\x6d\xc1\xd3\x7d\xa9\xcb\x16\xe0\xea\xd6\xd6\x60\xe7\x32\xb8\xdd\x4e\xc9\x16\x51\x4f\x26\xc3\x1c\x40\xb6\x04\x12\x57\x28\xf2\xb8\x75\xd3\x5b\xca\x0a\xa3\xbf\xdc\xd2\x30\xd0\x88\xa2\x71\xc2\xda\x3e\xe7\x95\xce\x5f\xb0\x7f\x28\xf6\x1c\xc3\xa5\xfa\x56\xc5\x43\x8f\xbf\x3d\x3f\x6d\xf5\x97\x93\xf9\xea\x12\xb6\x9c\x7d\x89\x32\x8d\xb0\x0b\x19\x5b\xe2\x32\xc3\x7e\xc1\x46\x58\x76\xa0\xc4\x2d\xd6\x2b\xfa\xa4\x3d\x21\x62\xc0\x3a\xa0\x70\x98\x69\xbb\x0c\x7e\xba\x87\xec\xf7\xb1\x8e\x0a\x24\x0c\x44\x1d\x22\xda\x33\xe9\xab\x85\xde\xbe\xdb\x0f\x60\x95\xdb\x49\x7d\x3b\x99\x4c\x26\x91\x92\xff\xa1\x5a\x5b\xeb\xa8\x7f\x8a\x3a\x93\xac\xd4\xb9\x9c\xc7\x16\x47\x0c\xcf\x92\x53\x90\xdd\xf4\x41\xef\x70\x15\x2b\x12\x57\x75\x1a\x87\x04\xa6\xb3\xa1\xf6\x82\xe3\x7f\xf8\x2a\xce\x18\x3b\x49\xd6\x44\x6e\xbf\xdd\x61\x6f\x4d\x77\xc1\x84\xb2\xf5\xaa\xd2\x32\x1a\x0b\xbe\x5e\x95\x1e\x18\xeb\xa6\xec\x7b\x67\x72\xd2\x88\x30\xc3\x43\xd2\x65\x0a\x5f\xf3\x15\x72\x85\xb4\xc0\x9c\x48\x91\x6b\x6f\x34\xb8\x29\x01\xe2\x03\xac\x41\x6b\xff\x07\x91\xe2\xdd\x19\xbc\x80\x86\x25\x7d\x21\x47\x3d\x60\x4a\x6b\x2c\x45\x4a\x96\x6a\x7d\x6a\xd0\x85\xdf\xb0\xaa\x16\x99\xc8\x85\x8a\xa6\x96\x40\x40\x22\xb7\xd4\x3a\xdd\x47\x3a\xf5\x6f\xb4\x7a\xa7\x75\x24\x99\xfa\x56\xe2\x2d\xd3\x91\x20\x50\x6b\x5d\x01\x28\xe6\x2e\x0f\x37\x80\x0d\x55\x13\x68\x68\xdf\x98\x80\x6b\x23\xf0\x89\x6e\xe8\xc7\x1b\x5e\xc8\x3c\xd0\x64\x01\x4c\x3c\xf6\x54\xda\xd6\x7b\x73\x96\x9e\x32\xce\x67\xf2\x88\x63\x96\x6b\x61\x08\xf5\x95\xfe\xa3\xbf\x3f\x3f\x65\xcf\xd8\x01\xd4\x3d\xb4\x4c\x5b\xc8\x31\xca\xa3\xbb\x1a\xac\x01\xe2\xe7\x61\x7a\x9d\xa7\xce\xa2\xc9\xba\x90\x7c\x93\x29\x8d\x4c\x81\x7e\x2d\xdd\xfb\x16\x5e\x4e\xdf\xc7\x27\x1e\x39\xbf\xa9\x8c\xb6\xaa\x97\x48\xa9\x1f\x51\x46\x77\xa9\x17\xc2\x32\xaf\x29\xa3\x2d\x73\x8f\x15\xba\xa6\x8c\x28\xba\x24\xbe\xea\x0f\x34\xc2\xf7\x46\x44\x16\xa5\x6c\x3c\x83\xdf\xdf\xfb\x33\xd8\xf3\xb5\x22\x65\x3a\x65\x32\xdc\x29\xd0\x01\xac\x14\x96\x03\xe1\xae\x8c\x3d\x62\xbd\x72\xcf\x6d\x4f\xea\xee\x14\xb7\xa7\xf8\x33\x7c\x24\x8d\xf8\x4e\xaa\xe6\x3d\x7a\xf9\x91\x69\x9d\x8d\x7b\x71\x79\x06\xc2\x90\xff\xf1\xbd\x25\xf1\xb6\xa2\x7f\x9a\x6f\xfa\xa7\x41\xad\xb7\xe1\xa3\xa4\xe7\x22\xbc\x70\x9e\x94\x1e\xa9\xe9\x62\xa7\xca\x6a\xae\x72\x5d\x6e\x7c\xb7\x3b\x06\x82\x67\xcb\x3e\xeb\x99\xbb\x39\xb1\xf2\x77\xf7\xcc\x0b\xbd\x9f\x7b\x46\x20\x8c\xa1\xf3\xaf\x40\x50\x2f\x91\xbc\xe7\x3b\xc0\x2e\x4a\xd3\x1e\xb8\xd8\x48\x61\x3b\x19\x3e\x13\xc5\x46\x3c\x68\x70\x43\xa8\x2c\x3c\xa4\xfa\xed\x5a\x17\x04\x62\x89\xc1\xda\xbc\xd3\x85\x2f\x11\x0c\x8b\xe3\x84\xfe\x0a\xd6\xc6\x12\x72\xe6\xeb\xba\x79\x55\xad\xad\x8d\x8d\x4b\xfc\x87\xf1\xd4\xd6\xa6\x89\xb6\xe5\xd8\xfa\xda\x38\x73\x70\xb8\x36\x4e\xe8\x67\xbe\x36\xfd\x50\x31\xc4\x7d\x52\x5e\xf6\x13\x84\x36\xe8\x39\x06\x9d\x4d\x78\xa2\xe3\xe3\xaf\x5d\x5b\x0a\x6f\xe8\x82\x09\x2d\xd5\xda\x63\x19\xfb\xe6\x48\xd5\xd6\x4f\x6c\x2c\xf9\xbe\x7b\x37\x64\xc9\xeb\xd8\xc7\xf7\xdb\xf3\x53\x0c\xf7\xf4\x4d\x0e\xe7\x18\xfa\x45\x88\x4c\xdb\x32\x4c\xdd\xf2\xdc\xf3\xd9\x73\xb5\x4a\x7b\xbc\xe2\x09\xe4\x48\xf4\x71\x29\xad\x9b\xc8\x36\xe9\xca\x64\xb6\x48\x39\xb2\x97\x28\x01\x72\x6a\x8c\xb7\xe7\x57\xf1\x52\x98\x8a\x67\xd1\x67\xd7\xcf\x08\xb1\x0b\xbd\xd2\xdc\xa9\xb3\x98\xa2\xf3\xed\x0c\x9d\xaa\xb6\x31\x66\x90\x7e\xe0\xd3\x12\xd4\xf0\x92\xb7\x69\x0f\x91\x71\xa0\x8d\xad\x14\xbc\x51\xb1\xad\x7a\x1e\xf4\x8c\x6d\xd9\x3a\x0f\xeb\x75\x6a\xf9\x5a\xd4\x8a\x60\xe4\x54\xbc\xe6\xa5\x80\x26\x0a\xc1\x88\x26\x1d\x7d\x12\xab\x20\x95\x53\x90\xc4\x0a\x38\x41\x83\x3b\x36\x75\x97\xc0\x24\x48\xe5\x01\x1b\xf2\x3a\x74\xac\xa7\x38\x15\x1a\xd9\x95\xd5\x84\xad\x65\xa9\x74\x43\xb0\xe2\xc9\x4b\xf0\x03\x38\x4a\x9f\xdf\x1a\xdc\x4a\x95\xeb\x5b\x33\x4e\xd0\xe0\x47\x14\xd6\x71\x71\x04\xec\xc1\x53\x0a\x1c\x68\xfc\xd4\x2d\xe4\xda\xfb\x26\xd1\xb9\xed\x53\xc6\x4c\x7f\x45\xf9\xa5\x27\xef\x91\x2f\x4a\xc3\x5f\xd6\x6e\xb2\x56\xf2\xe2\xb2\x12\x59\xa2\x2b\xf1\xed\xeb\xcb\x93\xa1\x40\x1a\x6b\xe4\xed\x52\xd4\x98\xbe\x71\x12\x7b\x55\x63\xb7\x62\xb6\xd4\xfa\x9a\x20\xf2\xa0\x87\xb9\x5b\x36\xb3\x69\xa6\xcb\x1e\x48\x76\x62\xe4\xc2\x1c\xfb\x5b\x3d\x71\xeb\x42\xa1\xe1\x92\x0a\x3a\x6f\x6c\x76\x80\xf1\x1f\x42\x10\x99\xb5\xab\x09\x27\x15\x21\x9d\x01\xbd\xb1\xb9\xdc\xc4\x36\xbf\x6d\x85\xde\x1d\xb5\xea\x04\x91\xe3\xd6\xa6\x87\xd1\xab\x33\xf5\x5a\x13\x0e\x48\xc0\x35\xce\x0b\x4e\x68\x94\x94\xf0\x0e\x6e\xde\xa0\x38\xcc\x62\x18\xbf\x70\x8b\x88\xdb\x2a\x3b\x18\x70\xff\x18\xf6\x0e\x15\x41\x28\x1c\x43\xab\x9d\xb6\xde\x9d\x18\x42\xa0\x2f\x64\xe0\x46\x38\x28\x90\x89\xf3\xa2\xdc\x83\xe4\x3f\x90\x62\x06\x6d\xcb\xe5\x8d\xd1\x3a\x61\x5b\x3e\x0f\x84\x0e\xb3\x74\x24\x65\xbb\x99\xd7\x1b\x64\xea\x08\x32\xb7\xe4\xf6\x36\xad\x07\x8a\xe0\x8f\x1b\x35\x44\x0e\xe8\xfb\x31\x6c\xd8\x7d\x18\x37\x38\x36\x29\xa0\x7b\x64\x04\x65\x74\x76\x88\xf9\xfa\x57\x69\xa0\x6c\xbd\xab\xcf\xf5\xf7\xe0\x5d\xff\xaa\x51\x5e\xc5\x87\xd6\x11\x4e\xd5\xc9\x4c\x9c\x64\x99\x6e\x14\x19\x56\x7d\x2a\xdc\x2e\x71\x2b\xf2\xcb\x81\xbc\x38\x53\x8c\xb3\x1c\xe4\x20\xf5\x16\x2f\x24\x37\x10\xf7\x19\xca\x84\x4a\xfb\x08\xa9\xdd\xdc\x20\xee\xbc\xf6\xc5\x5e\x8f\x19\x2b\x78\x3c\x9c\x39\x61\xa5\x53\x6a\x8f\x36\xd7\xa3\xff\xec\x46\x2c\x8d\x57\xb6\x6b\x7b\x86\x0f\x6d\xbf\x5a\x28\x42\x24\xc4\xe7\x3e\xad\x38\xcc\x72\x73\xdd\x11\xde\x09\x28\x66\x6d\xf5\x7e\xef\xdf\xfd\xc2\x4d\x38\xce\x2f\x9a\x04\x2f\x6e\x97\x96\xbc\x16\x17\xf8\x82\xbc\x09\x91\x4b\xf2\x46\x39\x61\x5d\xfb\xee\x10\x7f\x6f\x23\xa2\x11\xeb\x3a\x13\xf6\x56\xf8\xea\xe8\xf5\x37\x12\x51\xe4\xb0\xf4\x11\x12\x81\xc9\xc3\xfa\x62\x1d\xf7\x52\x6c\x69\x31\xea\x94\x5c\x84\x48\xab\xd9\x8d\x14\xb7\x48\x21\x26\x17\x8a\x17\xbd\xae\xc8\xc0\x99\x01\xac\x03\x11\x12\x87\xdf\x08\x64\x9f\xee\xb8\x57\x3a\x6f\x7b\x63\x63\x72\x23\xe6\x88\xfa\x6d\xd8\x48\x81\x0c\xaa\xcf\x90\x7a\x31\xea\x29\xbb\x38\x3f\x65\xcf\xa7\xec\x6f\xda\x58\xf7\x47\xe0\x07\xde\x76\x9c\x62\xbe\xdf\x53\xc7\xba\x17\x1c\x0d\x84\x2d\xc5\x21\x71\x78\x25\x24\x7e\x8f\xbd\x44\x71\xb8\x2f\xd3\xcc\x72\x5d\x72\x19\xd1\x69\xe3\x17\x4a\xa9\xe6\x4d\x51\xac\xd8\xbf\x1a\x5e\xc4\x1a\x18\x17\x3a\x87\x16\x18\xe0\x92\x84\x83\xbd\xf7\x97\xf0\x4f\x7f\x9d\xfe\xa5\x9d\xed\x5f\xa7\x7f\x89\xa3\x97\x68\x6f\xf2\x5f\xa7\xe6\x26\x9b\xfe\xc5\x73\x8a\x30\x2f\x6e\x6f\xc4\x4a\x2d\x77\x0c\x80\x75\x80\x7b\xe9\x91\xba\x9e\x5b\xa7\x3b\xee\xb5\x46\xab\xd7\x0b\xf5\xdb\x9a\x67\xe2\x42\xd4\x12\xec\x45\xad\x72\x2a\x7b\x45\x38\xec\x2c\xf7\x9c\xa8\xee\xda\x1a\x14\x19\xa3\x99\xfc\x52\x2a\x21\x72\xf4\x07\xfc\x5c\x05\x5b\xb8\xa9\xc2\xf1\x9a\x46\xc2\x85\x80\x4e\x24\xab\x05\x37\x88\xb7\xc9\x45\x21\x3a\x82\xc7\xa9\x8f\x34\xc7\xb3\xab\x28\xad\x26\x4a\x2c\xb8\x95\x37\x22\x64\x01\xb1\xb0\x05\xcd\xec\x7f\x8b\x3a\xe6\xc2\x77\x9c\x3a\x7e\x82\xb2\x2c\x45\xee\x8c\x2c\xf7\xc9\xa1\x13\x7e\x6c\x3a\x43\x1a\xa6\x64\x71\xd4\xaf\x54\xc5\xa5\x64\x15\x6c\x3b\xa1\xf0\xb1\xc1\x75\x44\x5b\x0c\xbe\x77\x20\xd0\xdb\x38\xe1\x20\x44\xad\x40\x38\x32\x8c\xcf\x6d\x4b\xae\x14\x5e\xa6\xc0\xef\x1a\x55\xa9\x10\x4e\x94\x7b\xe1\x8d\x50\x96\xf1\x41\x2f\x60\xff\xfc\xc5\x79\x59\xd0\x50\x18\x00\x72\xf8\x36\xf7\x27\xe9\x7e\xcf\x5c\xd7\x99\x9c\x45\x35\x28\x59\xf2\xa2\xe5\xb1\xe5\xec\xda\xed\x09\x4e\x6d\xca\x2e\x85\xa5\x6d\x7d\xa1\xd5\x22\xf4\x49\x07\xf7\xfd\x7d\x85\x6c\xa1\x99\x7b\x22\x9a\x0a\xbf\x21\x8e\x27\x77\xa5\x9b\xed\x48\x5c\xf6\xfb\x67\x61\xf3\x1e\x90\x06\xeb\x93\x53\xe7\x56\x17\x02\xcf\x23\x55\xc3\x6d\x2d\x1b\xde\x37\x7d\xc9\xb1\x1f\xfe\x90\xa4\x2e\x57\x81\x2f\xdb\x9d\xa4\xab\x76\xce\xb1\x41\x78\x6e\x2d\x87\x1a\x64\xa7\x9b\x51\x0a\x14\xd4\xae\x98\xb3\xcd\x2c\x42\x7c\x3c\xd1\x40\x5c\x8e\xc3\xdd\xa9\x5a\x56\x85\x60\x7f\xb9\x16\xab\x23\x64\x5a\x13\xf3\xb9\xc8\xec\x5f\xbb\xa6\x15\x51\x32\x5b\xae\x11\x5d\xb9\x89\xea\x9a\xfd\x25\xfc\xe9\xaf\x31\x26\x38\x31\xff\x42\xcd\xbe\xe0\x47\x27\x01\x05\xce\x40\xc4\x1a\x4d\x1b\xec\x50\x74\xb0\x02\x67\x03\x4d\x26\xdd\x72\x4e\xd9\x19\xd0\x21\x94\x82\x2b\x83\xff\x14\x2d\xd2\x39\x47\x78\x5c\x50\xb8\xf1\x7c\x85\xe4\x2c\x24\xc3\xaa\x5a\x91\x87\x42\x69\xa7\x85\xdf\xe8\x4b\x5f\xf2\x0e\x0c\x9c\x73\x51\x77\xff\x12\x2f\x5f\xe5\xec\x8d\x3e\x7b\x2f\xb2\xc6\x3e\x04\x6d\x0d\x8e\x6b\x91\xd6\xa4\xfc\xef\xa2\x65\x45\xc5\xf5\xbe\x16\x2b\x5a\xb4\x1c\x44\x74\x5a\xa3\x03\xf1\x0d\xce\x43\xb4\x58\xdf\x0a\xa0\x3d\x0f\xd7\x62\x65\x5a\x16\xf3\x6b\x98\x7d\xfc\x81\x45\x52\x91\xf6\xce\x87\xbe\x06\x67\xef\xa5\xb1\xe6\xff\xc4\x46\x4a\x70\x64\xba\x9c\x05\x9b\x01\x8f\x7e\xb8\x10\x30\x7b\x3c\x76\xa4\x53\xe5\x7e\x1c\x3e\xfb\xa1\x0e\x55\x58\x98\xa4\x93\xf5\x36\xac\x6e\x8f\xcf\x9f\xbb\xef\x20\xf4\x87\xa9\x45\x81\xef\xe7\x52\x56\x03\xea\x71\x30\xcd\x65\xde\xce\x98\xb0\xc0\x75\xd8\x77\x58\xeb\x33\xe7\x98\xa6\x14\x10\xb1\x20\xc2\x0b\x95\x40\x2b\x27\x6f\x78\x21\x30\x22\x77\x2b\x8b\x3c\xe3\x75\x7c\x64\x7d\xae\xeb\xc0\x2e\x6a\x7c\x5f\x13\x4f\x98\xe6\xac\x37\xff\xd2\xd2\x35\xad\xf1\x48\x1a\x5e\x5b\x99\x35\x05\xaf\x99\x7b\x17\x16\x84\x4e\x41\xe4\x73\xd7\xa9\x8f\x68\x57\x13\xc7\xd0\x9e\x59\x97\xd6\x3b\x89\x24\xed\xe6\xbd\x17\x3d\x47\xb3\x78\xa8\xf0\xa2\x25\x1e\xf4\xfa\x9d\xcc\x20\xc5\xea\xdf\xd1\xf6\x11\x89\x7f\xe3\x5a\xce\xfc\x3e\xc5\xad\x34\x4c\x2e\x20\x23\x7a\xd8\x19\x64\x34\xf5\xee\xce\xc9\x94\x7d\xb3\x0a\x7e\xe2\x11\x36\xfc\x41\x76\x4f\x11\x5d\xed\x12\xd8\xad\x82\xba\xc4\xb9\xf5\x1e\xa3\xb9\xae\xc5\x0d\x21\x75\x78\x90\x6b\x98\x93\xb8\x91\x99\x3d\x9c\xb2\xff\xbf\xa8\x35\x5c\xef\xe0\x8c\xd3\x10\x6b\x5d\x38\xb5\xd7\x4f\xe0\x19\x3b\x80\x5f\x13\x2d\xb2\xe7\xbc\x1f\x06\x50\x86\x6f\xab\x4d\xba\x70\x0f\x8b\x32\x26\x82\xf8\xb6\x00\xf8\x06\xe6\x07\x05\x8b\xb9\x61\x7b\xb4\x34\x66\x3a\x58\x0a\xf1\x37\x29\xbc\x5e\xd2\x78\x4d\x3e\x48\x8f\x02\x1d\x5b\xb4\xd0\x99\x68\x4d\x8f\xf6\xa2\xfe\x8f\xbb\xff\x9c\xd5\x62\xe1\x74\x6e\xb4\x48\xd4\xb2\x0f\xa0\xa1\xad\xae\x74\xa1\x17\xab\xcb\xaa\x16\x3c\x7f\xa9\x95\xb1\x35\x3c\x1b\x54\xc2\xf4\xbb\xe4\x91\xfa\xaa\x2f\xf5\x2d\xe3\x58\x5b\xe0\x34\x29\x10\xf4\xeb\x66\xb1\xc4\x4e\x67\xf0\x2b\x18\xcf\x6a\x1d\x45\x94\x1b\x3e\xd9\x87\x69\xcd\x94\x5d\xb6\xfd\xc9\x40\x11\x10\x5a\xa8\xc1\xcc\x20\xc9\x73\xcb\x57\x5e\xfd\xf1\x99\xcc\x85\x61\x1d\x7c\x3d\x2c\x45\x5c\xcf\x9b\x3b\x5b\x85\x6c\x42\x63\x62\xa2\x98\x2b\xe6\x63\xe1\xa6\xcf\xd6\x8f\x77\xf6\xec\x46\xa8\x0b\x9d\x1b\xdc\xc4\x98\xd4\x85\x67\xf4\x67\x27\xce\xfc\xb8\xf3\x28\xc4\xd1\x05\x9f\xbc\x39\x8d\x41\x6e\x3c\x42\x84\xe5\x8e\x0f\x6d\xdd\xda\xb8\x67\xd9\x1d\xfa\xee\x78\xb7\xa1\x0c\x38\x62\xbc\xd4\x84\x70\x08\x76\xb8\x0a\xfb\xf1\x00\x01\x10\x1a\x01\x6f\xc9\xdf\x5f\x5e\x8b\xdb\xa8\x9f\x09\x1f\xf5\x77\x11\x13\x74\x9d\x40\x00\xf7\x7b\x65\xb8\x95\x66\x2e\x23\xb3\xa8\xd4\xf0\x0e\xd4\xc2\xc5\xf7\xfd\xc3\x31\x2c\x79\xed\x4b\x72\x5a\xc0\xd3\x87\xc6\x3b\x1f\x52\xad\x1d\x31\x5f\xbc\xd3\x85\xf3\x08\xaf\xb6\xf4\xad\xfd\xba\x96\x84\xce\x1d\xcb\x7c\x1b\x6f\xc2\x34\xdb\x2e\xfc\x08\xa2\x68\xca\x99\xa8\xdb\x07\x01\xf3\xce\x32\xfe\x91\x1d\xb6\xa9\x5a\x7b\x16\x68\x6f\x2f\xa9\x2e\x26\xa5\xe8\x04\xb6\xe8\xec\xbd\x73\x80\x4c\x7c\xd9\x01\x8e\xc1\xd1\x5a\x17\x48\x6d\xf1\xdf\xd5\x97\xad\x1d\x05\x5a\x97\xcd\xfe\xb8\x5a\x0e\x7b\x75\xc2\xe9\x8a\x7c\x25\xba\x41\x2d\xea\xc2\x41\x2a\xed\xc2\xb1\x5e\x4e\x3a\x58\x26\x92\x44\xd6\x5f\x96\xb5\xa6\xa0\x44\xa8\x2c\xeb\x30\x1e\x3e\x0f\x65\x8e\x30\xde\x73\x44\x44\x28\x32\xe4\xd6\x6d\x2d\x71\xd0\x34\x10\x09\x12\xf1\x3c\xf3\xdd\xb8\x16\x2b\x30\x89\x70\x8e\x29\x8d\x9d\x49\x77\x18\x07\xb5\xc2\x0d\xc7\xc4\x7d\x04\xf1\x27\xc3\x72\x92\x7e\x3c\x45\x01\xe1\x20\xc4\xab\xbb\x31\xb8\x08\xd7\x5d\xe4\x1a\x6e\x04\x59\x2a\x6b\x63\xde\xe8\x82\xb7\x4f\x11\x46\xb0\x13\xe4\xda\xa8\x46\x83\x6b\x3f\x9b\xd6\xce\x9f\x25\x84\x71\xbb\x31\x58\x70\xbd\x19\xd0\x4d\x58\x1b\x1f\x0a\x5e\x0f\xed\x26\x49\x04\xce\xb2\x79\xb8\xd9\xc9\xa1\xe1\x9e\xe8\x5a\xb0\x73\x75\xc4\xde\x68\xeb\xfe\xd3\xc5\x8b\x13\x64\x9e\x6a\x61\xde\x68\x0b\xb2\x1e\xf5\x98\xe0\x72\x8d\x74\x48\x7c\x9c\x0c\x78\xd5\x93\x16\xbc\xe6\x2b\xb7\x97\xf8\x65\x49\xca\x1a\x87\xcf\x18\xf5\xe3\x3a\xe7\x2a\x9e\xe1\xae\x3f\xfc\x69\x68\xc3\x42\x86\x6c\x1c\x84\x11\x82\xc1\x6d\x5b\x74\x72\xfc\xaa\x1b\x9b\x91\x2c\xa6\xeb\xc1\xe9\x8b\x0f\xdb\x76\x63\xfd\xe3\xc3\x27\x24\x88\xf4\x1f\x0e\x01\x0d\x94\x09\x6d\x85\xaa\x82\xc0\x3c\xd0\x1f\x79\x53\x63\x77\x2b\xe7\x75\x5b\xb1\x90\x19\x2b\x45\x1d\xcd\xef\xd8\x1f\x40\xb7\x9e\x7a\x79\xd3\x0e\x4d\x82\x5d\xd9\x9f\x04\x51\x83\x80\x13\x00\x4e\xe6\x48\x0e\x05\xca\x42\x93\xb4\xe4\xb1\x14\xdd\x38\xf4\x9c\xfd\x3f\x2d\x40\xe4\xff\x65\x15\x97\xb5\x99\xb2\x13\x0f\xde\x26\x89\xec\xcb\xf3\xf0\xe5\xde\x74\x49\x22\x4b\x5e\x6d\x26\x07\xb9\x62\x02\x99\x67\xa8\x5f\xbe\xee\x96\x1d\xb1\xdb\xa5\x36\x82\x6c\x31\xb6\x21\xc5\xbd\x6b\xb1\xda\x3b\x5a\x57\xa2\x24\x99\x7b\xe7\x6a\xaf\x43\x7d\x0f\xd4\x47\x70\x21\x68\x9f\xaf\x8a\x15\xdb\x03\x79\x7b\xd3\x0d\xef\x8f\xe6\x9d\xa6\x7b\x8c\x64\x37\x21\xb5\xab\xf1\xfa\x44\x08\x77\xdc\x47\xda\x92\x62\x50\xfb\xaf\x51\x48\x17\xd8\x27\xbd\x68\xb9\x58\xd4\x02\x8a\x59\x30\x6a\x0e\x21\x9d\x92\xaf\xa0\xf8\x5f\x89\x1b\xa1\xa2\xe0\x94\x5e\xa8\x34\xbe\xe7\x43\x3e\x65\xe7\x76\x7f\xdf\xf8\xbb\xfd\x5e\x96\x4d\x89\x0c\xe7\xd6\x12\x9e\x9d\x5c\xce\x43\x63\xb3\x50\xf0\x31\x8c\x48\x85\x58\x5a\xb4\xe4\x36\x83\xa0\x56\xd0\xb9\x72\x3d\x4b\xc1\x74\x2c\xad\xa1\x33\x9d\x87\x91\x5f\x6c\xbd\xc5\x5e\xe9\x9a\x89\xf7\xbc\xac\x0a\x42\x2e\x1a\x92\x1c\xbf\x9f\xfc\x5b\x2b\x11\x32\x08\x47\x2c\x1c\x05\x2c\x54\x89\x8f\x18\x6a\xf6\x1c\x15\x47\xd7\x82\xb9\x2d\x25\x19\x44\x4d\xe3\x13\x69\x3e\x49\x64\xd8\xf3\xe3\xe7\xc7\xcf\x5e\xb0\x0f\xcc\x4d\xfd\xb9\xff\xef\x97\xec\x43\xb4\x48\xf7\x73\xbf\x67\x1f\xd8\x07\xc6\xd8\x05\x63\x83\xff\xba\x11\x2f\x71\xc2\xe4\xbc\xbf\x86\xcf\x8f\x98\x54\x99\x2e\x7d\x4c\x96\x65\x04\xa3\x1b\x94\xe6\x4c\xb4\xc9\x2c\x00\x6e\xe2\xd4\x81\xfa\x27\xd3\x84\xfa\x47\xb7\x86\xcf\xff\x4f\x90\x09\x90\x6c\xcb\xb4\xf2\x92\x9f\x1f\xc0\x92\xc6\xb3\x24\xdc\x42\x17\xac\x92\x5f\x63\x7c\xf7\x24\xb3\x0d\x2f\xdc\x62\x1c\x7c\x39\x79\x76\xc8\x08\x28\x8c\xfe\x74\xd8\x8d\xd4\x05\xb7\x22\xac\xf0\xc1\xf3\xc3\x29\x8b\xed\xbe\xc0\xd6\xb6\xe8\xcb\x11\xb6\x68\xb0\x3b\xb0\x8a\xee\xf2\xbb\x49\x7b\xa5\x45\xb8\xf0\x21\x02\x15\xe8\x1b\x4e\xfb\x7d\x64\x28\x0f\xfb\x73\xb8\x97\xcf\x02\x1c\xc4\xc3\x2d\x23\xca\x93\x70\x8c\x84\x65\xf8\xe4\xee\xc7\xfe\xb7\x76\xd9\xa1\x44\xcc\x51\x2b\x27\x04\xa7\x28\x86\x97\x9e\x43\xd3\x38\x54\x69\x66\xca\xde\xe8\x3c\x10\xe8\x63\xe5\x50\xb4\x44\x0c\x1a\x7b\x85\x29\x4d\x1b\x05\x95\xc0\xe5\x90\x11\xc8\x1c\x7a\x38\xda\x4c\x2b\x23\x73\x51\xa3\xee\x98\x09\x5a\x8f\x15\x86\x6a\xbc\xcd\x3e\xb2\x1f\x3b\xc9\x58\xdf\x07\xb0\x6f\xda\x44\xff\x8a\x4d\x03\xf6\x66\x4d\x76\x2d\x6c\xb0\x3e\xeb\x95\x9b\x70\xd5\xc4\xbf\x44\x33\x5e\x70\x95\x89\x7c\x33\xc7\x64\x75\x6c\x07\x26\x2f\x11\x66\x06\x17\xda\x6c\xde\xcf\xfb\x07\x78\xe0\xd8\x48\x77\xa6\x99\x7e\x3f\xae\x8b\xeb\x00\xe7\xd1\x0b\xe4\xd3\xdd\xb9\xe0\x45\xa8\x82\x81\x9e\xc3\x6d\xe7\x02\xb5\xbf\x1f\xbf\x91\x38\x37\x8f\xbe\x42\x1b\xa0\x43\x60\x4c\xd9\x84\x9d\xea\x37\xda\x92\x51\xe1\x07\x1e\x29\x77\xc8\xac\x28\x0a\xd4\x08\x6d\x9f\x35\x42\x27\x08\x64\x2b\xf3\x02\xdc\x87\x4f\x5a\x50\xca\x89\x5a\xdd\xf2\x15\xfe\x1e\x12\x82\xaa\x9b\x17\xb4\x94\xe8\x23\x5c\x98\x8c\x9f\x28\x9a\xd3\x3d\xdd\xc0\x0d\xdb\x1b\x1c\x86\xb8\xc6\x3f\x0c\xdf\x56\x77\x71\xc1\x66\x91\x73\x56\x15\x3c\xc3\x7a\xaf\xee\x85\x25\x18\x40\x68\x4b\x07\xdb\xd7\x5b\x01\x86\xed\xf9\x67\x7c\x2f\xde\xf7\xeb\x5b\xce\x5b\x2d\xe1\x68\x89\x43\xcb\x79\x9b\x25\x4c\xdb\xf3\x0d\xcb\xb9\x67\x09\xff\xde\x59\x71\xf1\x1e\xe7\x86\xe5\xdc\xb3\x84\x2f\xdc\xff\x23\x48\x5c\xb7\xa0\xcf\xe7\x6c\x43\xb7\xc4\x9f\xa6\x76\x2d\x07\x97\xbc\xb3\xd8\x28\xce\x19\xc0\xb1\xef\x34\xaa\xbf\x04\x5b\xf3\xf7\xf1\xa6\x6f\x6b\x8b\xb3\xdf\x1f\x7f\x79\xfc\xfc\xc0\xed\xcd\x97\x87\x6e\x97\x06\x56\xf0\xf3\x78\xc9\x5a\x0d\x66\xe6\x35\xa2\x14\xa6\x6f\x07\xc7\xdf\x7e\x85\x20\x47\x76\xab\xeb\xdc\x83\x27\x43\x51\xb5\x5b\x21\xd0\x2f\x14\x4b\x58\x96\xe1\xed\x3d\x62\xb3\xa6\xd7\xb5\x92\xdd\x6a\xd2\x0b\x00\xee\x84\xb4\xec\x77\xa5\xae\xc5\xef\x7a\xf2\x43\x44\x20\xde\xda\x5a\x7b\xbc\x69\x36\x70\xe4\xeb\xfd\x7e\xd2\x63\xa4\x2b\xa4\xb1\x93\x92\x57\x93\x6b\xb1\x8a\x88\x19\xd1\x60\x52\x29\x20\xa9\xcd\x59\xe3\xc7\x7f\x7a\xec\x17\x9b\xc8\x51\x61\xa8\xfb\xdf\x79\xf0\x89\x17\xe3\xfb\x22\x70\x15\x97\xc3\x28\x3d\x64\x69\xb6\xea\x13\x5d\xcc\x44\xa1\xd5\x02\x31\x43\x2d\xe7\x71\x84\xd4\xc8\xd6\xdc\xc6\xea\x9a\x2f\xc4\xb1\xff\x94\x58\x62\x88\x87\x44\x42\xfe\x80\xfd\x14\x07\xa5\x41\xc8\x4e\x48\x68\x4e\x19\x38\x53\x02\x16\x0d\x34\x2f\xcf\xa0\xdc\x1a\xb6\x64\xc0\x16\x1e\x2b\x9a\xb0\x6f\x0f\x0a\x86\x8c\x64\x8f\xa0\x42\x38\xf8\xad\x39\x2b\xb8\xb1\x32\xfb\xa6\xd0\xd9\xf5\xa5\xd5\x75\xa2\x6f\x70\xf2\xe3\xe5\x86\xc4\x14\x6c\x01\x57\xec\xe4\xc7\x4b\x76\x2a\xcd\x75\xd7\xd1\x1c\x1b\xaf\x99\xb6\x2a\x99\xf2\xee\x72\x76\x8d\x1d\xe2\xdc\x63\xb0\xd4\xc6\x9d\xb1\x6c\x29\x95\x08\x29\x8c\xf8\x88\x8e\x78\x5f\x69\xdf\xc8\xbd\xe5\x42\x4f\xbb\xed\xbf\xe1\xb7\x46\xe0\x72\xce\xdc\x72\xba\xff\x59\xd0\x1e\x9e\x07\xe6\xec\xc6\x0f\x38\x3f\x8d\xfc\xc1\x14\x2c\xd2\xdc\x5c\x91\xda\x82\xac\x1f\xe1\x57\xb2\x10\x58\xb2\x83\xcd\x99\x09\xa1\x77\x16\x92\xe8\xbe\x03\xb0\x3b\xaf\x2b\xdd\xb0\x5b\x8e\x19\x41\x78\x59\x88\xf0\x47\x59\xbd\x60\x67\xbd\x06\x84\xc8\xb1\x14\xa6\x4c\xce\x24\x81\xdd\xdc\xd2\xef\xfb\xb2\x05\x77\x2f\x68\x49\x3b\x48\x26\xba\x47\xd2\x17\x3f\xb1\x33\x74\x9c\xcc\x0b\xb6\x27\xde\xdb\xaf\xf6\x68\xf8\x84\xbd\xf7\x73\xb3\x77\xc4\xf6\x94\x9d\x9b\xbd\x29\x3b\xf7\x3d\xfe\x9d\xe3\xa8\xe6\xa2\x8e\xab\x82\xe8\x06\x06\xb8\x70\x62\xeb\xa4\x91\x70\x85\x49\x52\xc7\xbd\xf6\xb4\xd3\xf2\xf6\xf4\xed\x0b\x08\xb2\xe4\x9a\xdd\x0a\x56\xd5\xe2\x46\x28\xcb\x44\x5d\x53\xc1\x53\xfe\xf5\xec\x8e\x1c\x52\x87\x65\xba\xac\x6a\x5d\x4a\x43\xc5\x2c\x61\x8a\x0e\x54\x70\xac\x86\x63\xa9\x88\x07\xa8\x8f\x85\xeb\x9f\xac\x3d\x80\x70\x23\x88\xa3\xc6\x4c\x59\xd7\x9d\x7a\x4c\xdd\x71\x3e\x67\x1a\x53\x9f\x43\x62\x20\x49\x2a\x89\x66\xc1\x44\x76\xda\xc2\xcf\xd6\xd9\x2d\xbd\xbb\x4e\x92\xf9\x4a\xd7\x41\xdc\x71\x2e\x6e\x8e\x4d\xce\x9f\x1f\xc1\x12\xe0\xa5\x24\xf6\x03\x18\xec\x0b\x37\x6c\xef\xf9\xde\x94\x5d\xca\x52\x16\xbc\x2e\x08\x11\x68\x36\xd4\xf1\x9d\xec\xb9\xae\xdb\x89\xd3\x6e\x98\x61\x7b\xcf\xf6\xd8\x81\xae\xe1\xab\x9d\xdf\x52\x08\x7e\x23\x02\xe5\x10\xbd\x29\x02\xa0\xb0\x0e\xa3\x5d\x57\x96\x98\xc2\x61\x49\x69\x1c\x06\x86\x08\xcf\xdf\xaa\x82\x04\x33\x5e\x23\x7d\xc4\x33\xc4\xf6\x6c\xdd\x88\x3d\xea\xb9\x9f\x6b\x67\x87\x02\x91\xa2\xc0\x27\xf8\x9d\x9f\x61\xda\x06\x49\xe5\xbd\xa8\xd7\xee\x62\x01\xdb\x04\x4e\x74\x1a\xdf\xd8\x10\xc7\x5d\x17\x7e\x0f\x88\x06\x09\x51\x50\x96\xec\xc3\x8e\x62\xd5\xb2\x84\x9e\xb7\x38\x82\x89\x9a\x7e\xa6\xbe\x57\xf2\x5f\x8d\x60\xe7\xa7\x81\xfb\xb3\x12\xb5\x91\xc6\x52\x11\x5f\xf9\xc0\xdb\x91\xe8\x02\x1d\x9c\x94\xfc\xdf\x5a\xb1\xb3\x6f\x2e\x13\xde\x93\xc3\x27\xe1\x92\xb0\x94\x07\x9b\xff\xbb\xa9\x85\x73\x08\x93\x3c\xd5\x93\x20\x65\x10\xaf\x88\x3f\x45\x20\x87\x9d\x72\xcb\xd1\x49\xc5\x17\x51\x2b\xba\xfd\xec\xb4\xca\x0c\x2a\xf3\x02\xdf\x2f\x21\x5a\xc1\x1e\xcb\xf1\x73\x67\x97\xc0\xae\x8d\x3f\xf8\xfd\xbb\xf3\x07\x74\x18\x33\xb0\x36\x17\xaf\x75\x3e\x82\xd7\xf8\x37\x6d\x2c\x7b\x89\x12\x59\x49\x14\xc9\xd8\x1b\xad\xc4\x11\x3c\x26\xcc\xbd\x26\xfe\x8f\x3f\xd6\xd2\xc6\x90\xc2\x76\x23\xc9\x2c\x0e\x7b\x99\xbc\x3a\xce\x28\x7e\xd3\xeb\x39\x91\x73\x4b\x33\x8d\x40\x33\x7a\x0f\x64\x56\xe8\x19\xf3\xda\xe9\x31\x56\xe6\xfb\x77\xe7\xa3\x2c\xcc\xf7\xef\xce\xdb\x45\x01\xc1\x29\x6e\xd9\x63\x2e\xca\x48\x01\x98\xf5\xf8\x4b\x8a\x6b\x32\x65\xaf\x7d\xf5\x05\xef\x7b\xac\xc4\x56\xaf\x6c\x7b\x84\xa4\x8b\x76\xd0\x64\xb6\x11\x92\x69\x88\x8d\xac\xc5\x39\x68\xce\xde\x66\x6c\x64\x10\xe7\xa0\x9d\xb2\xe4\x86\x1a\x49\x27\xec\x5a\x2a\x52\x75\xe5\x50\x55\x9f\x05\x42\x54\x8f\x76\x02\x16\x70\x62\xd5\x66\xd9\x14\x16\xd8\x2b\xe1\xe2\xb9\xdb\x6b\x9c\xf5\x97\x70\x05\x19\xf3\x4c\xfa\x8c\x9d\x0a\x84\xd5\xe4\x2f\x02\x47\xbd\xfb\x2d\x74\xc5\xd9\x9b\x58\xf7\x4b\x5e\x73\xc5\x17\xd4\xcf\x07\x5b\x8c\x95\x28\xa2\xd3\x60\xec\x40\x53\x80\xdb\x0c\xf5\x58\x10\xc7\x6f\xb8\x2c\xf8\x4c\x16\xd2\xae\x9c\x9f\x45\x48\x58\x33\xd8\xfc\x8e\xad\x16\xfa\x07\xe4\x0f\xfe\x90\x8e\xe6\xbe\xf6\x99\x77\xc1\x81\x63\x07\x4e\xf6\xf1\xad\x33\x10\x88\xeb\xd3\x7a\xae\xd0\x62\x0d\x38\x70\xd0\xbd\xed\xbb\xb5\x34\xcd\x86\x5d\x0b\xd7\xbd\x5a\xba\xda\xa0\xb8\x7a\x70\x44\xdd\x9b\x92\xee\x2e\x38\x29\xa3\xb8\x0b\x20\xc8\xf7\xf5\xf8\x8f\xf7\x18\x8c\xc8\x6a\x61\x49\x3e\x03\x5c\x67\xc2\x4f\xa6\x78\x0d\xbb\xbb\xfc\x78\x77\x19\x27\x12\x8e\x4b\xf2\x0e\xf4\x5b\xe1\xa1\x58\x62\x14\x98\xdb\x8e\x26\x03\x6f\xf8\xa5\x7f\x66\x7d\xd7\x1e\x9a\x2f\xe8\xa6\xe6\xee\x76\x1c\xbd\x51\x3b\xad\x94\x17\xab\xbd\x59\xc9\x8b\x8c\x5d\x76\x88\x0d\xb7\xc8\xdf\x90\x89\x6a\x39\x4f\xa3\x1a\x7d\x29\xaa\xe5\xab\xcb\x21\x9c\xc5\xfd\x5b\xf4\x67\xbc\xba\xdc\xd4\xf1\x78\x68\x60\x95\x29\x58\x34\xe4\xa0\x2f\xe4\x5c\x58\x19\xbd\xb0\x8f\xa2\xe5\x4b\xad\x24\x81\xb0\x21\x89\x1d\xc9\xff\xca\x74\x8f\xe1\x5d\xf8\x6c\xf6\x9a\xf6\x19\x38\xa0\x00\x3a\xd3\x45\x21\x32\xc8\x14\xe9\x39\x1c\x28\xea\xda\xe0\xd8\x12\xce\xf5\x20\x65\x33\xbd\xfe\x13\x04\x74\x7d\xe8\xf6\x18\x2f\xc5\xf1\xbb\xb3\x93\xd3\xd7\x67\xd3\x32\xff\xcd\x52\xdf\x4e\xac\x9e\x34\x46\x4c\xa4\xa5\x5b\xc5\x8f\x40\x9f\x94\x98\xee\xb5\xcb\xf4\x43\xd1\x35\x7a\xfa\xde\x00\x10\x9e\x9e\xf6\xf6\x40\xc3\x5a\x6b\x7b\xc4\x6a\x0e\x10\x5b\xbb\x24\x92\x6d\x84\x7e\x4c\x78\xb6\x6c\x2d\xc4\x51\x2f\xf1\x43\x12\x19\xd1\xd4\xad\x37\x8f\x27\xe1\x31\xed\x6f\xf4\xe3\x22\xee\xd3\x56\x23\x2d\xcd\xa6\xfa\x98\x91\x16\x0c\xae\x31\xd2\x88\x5b\x73\x3e\x0f\xad\x24\xd2\xec\x3d\x8a\x03\xc7\x3e\x72\x16\x2e\x5b\xa9\x54\x4d\x8e\x80\x02\xbb\x74\x27\xe3\x5a\xac\x18\xf0\x90\xcc\x75\x0d\xfd\x77\x69\x70\x82\x5e\x7a\xf6\x58\xd8\x0c\xb6\xe2\xb8\x31\xa2\x9e\x26\x58\xa7\x4f\x60\xeb\xa9\x36\x28\x7c\xf4\x3b\x31\x1f\x7f\xe3\xdf\x09\x5a\x36\x1d\x48\x6b\x02\x07\x80\xf7\xb3\x79\x63\x97\x58\x6f\x49\xa2\x8c\x67\xed\x97\x76\xa7\xa7\x7f\x12\x90\x39\xe7\x73\xdd\x7a\x32\x4f\x46\x2a\x2b\x9d\x22\xfa\x2e\x6c\xe3\xec\xf4\x13\x57\x7e\xf3\x13\xb8\xbd\x22\xb3\xef\xfa\x46\xd4\x37\x52\xdc\x1e\xdf\xea\xfa\x5a\xaa\xc5\xe4\x56\xda\xe5\x04\x57\xd5\x1c\x43\x83\xc3\xe3\xdf\xc0\x7f\xc8\x33\x42\x8c\xe0\x49\x9e\xfb\xda\x9e\xc6\x88\x79\x53\x60\x95\x4b\x02\x3b\x17\xaf\xe4\x0f\xa2\x36\x52\xab\x23\x88\xdb\x1f\xb1\x46\xe6\x5f\x53\x4e\x12\x4b\x55\x24\x4e\x85\x8e\x6c\xef\x91\x69\x72\xe0\x14\xf1\x5c\x1b\xec\x77\xee\xb6\x2e\xd9\x3c\xe3\x79\x29\xd5\x53\xb8\xed\x14\x47\x5d\xaa\x3c\x7e\x77\x86\x3b\xf3\x12\x64\x0c\x3d\x75\x94\x4b\x04\x05\x75\xfd\xce\x78\x88\xaa\x02\x49\x43\xa8\x19\xa0\xd4\x39\x77\x15\x06\x9f\x64\x8e\x95\x2b\xf3\xaf\x62\x82\x5f\x31\xa9\xf2\x6e\xaf\x76\xf0\xff\xed\xe3\xbe\xe0\xff\xff\x39\xe9\xe7\x35\x80\xbe\x4f\x42\x93\x84\x7e\x04\xa0\x4f\x5d\xd0\x7b\x07\xe8\xdf\xdb\x0d\x64\x3b\x67\xf8\x13\xc6\x93\x72\x86\xef\x61\xef\xd3\xfc\xdf\x91\xbd\xa0\x4a\x4b\x45\x6b\x84\xc6\x3c\x29\x2f\x38\x2c\xf8\xaa\x84\xdc\x07\x70\x35\xf0\x9a\x97\xc2\x0a\x62\x30\xd3\x77\x32\x70\x12\x95\x6f\x3f\xf9\xb6\x12\xea\xd2\xf2\xec\x3a\x01\xff\xb6\xf3\x42\xd6\xc6\xce\x0b\x21\x8d\x24\x2d\x3e\x1e\xde\xdc\x1b\xa9\x32\x27\x77\xfe\x70\x03\x59\xa2\x3c\x4b\x4e\x10\xa9\xbc\xe1\x7c\x6f\x01\x87\xa7\xf1\xaa\x66\x5a\xcd\xe5\xe2\x35\xaf\xd2\x32\x84\x41\xca\xc0\xf5\x88\xfe\x8e\x76\x32\x21\x29\x08\x84\x78\x95\xae\x9a\x02\x3b\xe7\x11\xdc\x42\x42\xc5\xfd\xa3\x75\x31\xf1\xde\xef\x38\x38\xec\xee\x8d\x2b\x75\x2e\xd8\x4c\xd2\x4d\x9c\xc6\x08\xe7\xf3\x65\xbe\x2d\x20\x18\xf0\xce\xf0\xf6\xf3\x25\x5e\x91\xd6\x21\x40\x52\xbe\x40\x24\xfa\x8c\xcc\x3c\xff\xec\x8f\x7f\xfc\xe3\xb0\x27\xfb\xb3\x3f\x7c\xf5\xd5\x94\x9d\xca\x1a\xb8\x7f\xa8\xed\x0e\x9c\x8e\x0f\x54\x09\xdc\x2e\x81\x14\x0e\x48\x00\xa1\xa3\x25\xb1\xd2\x16\xfc\x16\xa4\xe8\x71\x06\x9d\x67\x04\x2f\xe5\x62\x49\x26\x18\x77\x3a\x4b\xab\x79\x21\x33\x8b\x14\x62\xf8\x80\x68\x38\x08\xd4\x20\x0d\x0f\xdf\xd9\x16\xbb\xc2\x79\x3a\x62\x85\xbc\xa6\x4d\x73\x6e\xbe\xad\x75\x53\x75\x14\xc9\xb5\x30\x4d\x41\xa0\x25\xe9\x06\x7e\x68\x7b\xce\xdd\xa2\x7e\x86\xb5\x7d\xe4\x2c\xf0\x7a\x9f\xfe\x9e\x53\x78\x44\xa3\xeb\x63\xd8\x37\x64\x82\x57\xb3\xe2\xb2\x0e\xb8\x7a\x28\xf0\x01\x63\x84\xb6\x53\x03\xc3\x2d\x13\x79\xef\xed\xb8\xa5\x70\x26\x31\xf4\x88\xab\x5a\xff\x0f\x62\x99\x81\xa3\xb0\xf7\x92\x13\xd3\xd1\x18\xa7\xf0\xf4\xe2\x80\x8d\xea\x18\x2f\xc9\x1a\xca\xf9\x07\xbe\x27\x51\xaf\xc9\xf3\xf9\x3c\xa5\xf3\x3b\xf3\xb1\xd4\x42\x1a\xf7\xf9\xd7\x62\xd5\x35\x72\x6d\x57\x85\xa8\x4f\xfc\x4a\xb6\xb3\x03\xfd\x67\xe8\x2d\x8f\x1a\xb5\x31\x4b\xa7\x48\x61\xa6\x60\x35\x90\xcb\xb9\x79\xe8\x98\xd3\xcd\x15\x89\xb4\x89\x91\x64\x37\x2d\x3f\xa7\x70\xf2\xdb\x83\x9a\x5c\x22\x6d\x84\x6d\xf0\xb8\x23\xff\x80\x5b\x17\x11\xd5\xbc\xb3\x1b\xd8\xa2\xb8\xe4\xf5\xb5\xc8\xbd\xaa\xe7\xc5\x94\x5d\xb8\x8d\x82\x76\x15\xd4\x1b\x85\xad\x6b\x6e\x10\x78\x57\xf2\x15\x2c\x89\x77\x70\xc9\x0b\xb0\x3f\x9d\xee\xe3\x23\xaa\x6b\x66\x2c\xaf\xfd\x4b\xe5\xfe\xfd\xf3\xed\x5a\xf6\x9a\x57\x06\x3b\x6f\x38\xcf\x9f\xda\x8b\xc0\xb3\xf1\xc0\xea\x78\x93\x83\xfb\x13\xf3\x1f\xd7\xc7\xcb\x2d\x02\xe9\x47\x9f\x52\x0f\xaf\x2b\xff\x5a\x58\x1d\x14\xf1\xa3\x76\x42\xa2\xd7\x62\xb2\x5f\x70\x2e\xc8\x42\x19\x9a\x6b\x9d\x8b\xe1\x1b\xbb\x53\x18\xf9\xbb\x51\x6e\xf7\x2c\x12\x24\x3e\x43\xf6\x6f\xf0\x2f\xce\xe7\xd8\x10\x3e\xed\xc5\xc6\xd1\x7b\x13\x7a\xbe\x1f\xdd\x10\xc2\x31\x13\x10\x0e\x59\x73\x29\x18\x51\x6d\xe3\x18\xdb\xb1\xc0\x71\xb7\x7b\x91\x20\xd4\x39\x26\x77\x39\x19\x29\x2b\x00\xee\xc9\x68\xae\x06\x8e\x54\x87\x03\x47\x9a\xdb\x81\x83\x0a\x05\xc5\xb1\xa1\xf7\x82\xf5\x90\xb0\xe0\x68\x26\xcc\xbb\x83\x61\x35\xb9\x69\x12\x0e\xdb\x2a\xe4\x29\x7b\xed\x2d\x1a\xa7\x27\x92\x7a\xb9\xcd\x8c\x2e\x1a\x8b\xa1\x81\x4e\x6c\x8a\xa1\xd4\x4d\x16\x96\xc0\xb7\x4c\x42\x1b\xc9\xfd\x82\x04\xa1\xa0\xb9\x3a\xb3\x0b\x8c\xfb\x94\x37\xc5\x0d\xaa\xed\x86\x23\xe9\x61\xa3\x66\x15\xee\x2b\xa3\xf0\xb4\xb2\x09\xf7\x90\x49\x18\x2f\x8b\x90\xb4\xef\xc1\xcf\x49\xaf\xc9\xf1\x24\x48\xb7\x4b\xe1\xb1\xe6\x34\x8d\xd5\x05\x31\x74\xcd\xdc\xb3\x00\x0e\x6e\x4a\xb7\xc0\x5c\xcc\xa5\x22\x39\xee\xf4\x2c\x6b\x66\x64\x5a\x1a\xe0\xf2\x9c\x1d\xbc\x0c\x54\xb4\xa1\xf4\x2b\xfa\x13\xce\x95\x15\xf5\x9c\x67\xe2\xb0\x9f\x4e\x08\xf5\xda\xa4\x0e\xce\xd2\xb0\x25\x57\x79\x11\x18\x73\x99\x78\x6f\x45\xad\x78\x01\x73\xce\x6b\x79\x43\x78\x7e\x0f\x4e\x8a\x6a\xc9\xd9\x5c\x70\xdb\xd4\xf1\x85\x8a\x8f\xc3\x4a\x43\xf9\xd4\xa4\x9c\x06\xfc\xc2\xf4\x1a\x51\x10\x13\x02\x67\x91\xac\xc0\xdd\xf0\x5a\xbe\xdb\xf4\xd0\x76\xc6\x9d\x0d\x6a\x88\x5c\x9a\xe0\xa6\x3b\x3d\x00\xb1\x6c\x78\x5d\x57\xba\xa1\xd9\x74\x08\x63\x9c\x43\x57\x70\x81\xfd\xf9\x33\x8b\x41\x48\x62\x34\xb3\x16\x0b\x69\x2c\xf4\xa9\xf0\xd1\x2c\xcf\x56\xff\xe0\xcc\x15\x4f\x92\x1b\x65\x6c\xc6\x91\xb9\x8f\x1f\xea\x1b\x99\x07\xca\x39\x80\xaf\xa3\x47\x4a\x0b\xf4\x19\x56\x71\xd3\xa3\x55\xe6\xc6\xe8\x4c\x42\x46\xee\xe5\x65\x2c\x5d\x14\x0e\x7f\x07\x30\x52\x0a\xf1\xc8\x5c\x58\x51\x97\x52\x09\xf2\x53\x18\x20\xbc\x7d\x98\x9f\x86\x76\xe3\x24\xf0\x7e\x9a\x7d\xa8\x73\x71\xd1\xcc\x0a\x69\x96\x97\xa3\xa1\x87\xde\x6c\x11\x4a\xdd\x51\xbe\x59\x45\x31\x40\x14\x91\xb3\x0c\x1e\x85\x64\x84\x32\x12\xa2\xb8\xce\x2a\x75\x5e\x22\xb5\x32\xc3\x6a\x38\x7e\x61\x96\x7d\x0d\xaa\x81\xf3\xb6\x10\x96\x76\x60\x82\xb8\xde\xba\x7a\x02\x7d\x6a\x9a\xc1\x49\xfa\x5e\x55\x03\x59\x19\x2f\x0a\x83\x21\x11\x5a\xd6\x32\x74\x8a\x0d\xb6\x27\x86\x16\x3c\x21\x3f\x49\x24\x6a\x04\xe9\x94\x45\xd8\x75\x28\xab\xc1\xb7\x9d\xac\x7a\x36\x0e\x51\x5a\xdb\xd9\x52\x23\xc1\xb6\x62\x5a\x05\xc1\x47\x8c\x17\x45\x0a\x53\x40\x7b\xea\xb1\xc5\x1a\x2a\xb6\x47\x68\x42\xbb\x83\xb5\xed\x60\x6d\xbd\xf1\x34\xc0\xc9\x97\x3e\xb4\x8c\xcf\x03\xcf\x27\x64\x0e\x29\x04\x54\x35\x35\x6f\xf9\x98\xbb\x88\x33\x6d\xcd\xc7\xe7\x6a\x19\x83\xbb\xf6\xc4\xfa\x3e\xc7\xe9\xf0\x89\x1f\xd6\x04\x82\xdf\x49\x3c\xf7\xf8\x40\x4e\x7c\xaa\x20\xeb\x69\x1b\x9a\x17\xcb\x7c\xcb\xee\xa1\x1d\xd8\x3d\xc5\xb4\x2d\x0d\x3e\x8b\x73\x57\xbc\xa0\x7d\xc3\x72\x9d\x35\xa5\x50\x96\x6e\x31\xb8\xe3\xd6\xd5\x69\x20\xc5\xdc\x7f\x66\x9f\xf1\x5c\xdf\xaa\x5b\x5e\xe7\x27\x17\xd1\x44\x99\x43\x27\xb8\x93\x93\xd2\x4a\x26\x4c\x87\x39\x39\x7c\xa6\x1b\x1b\x68\xb2\xfa\xb8\xcb\x68\xb1\x03\x9c\xe6\x0e\x77\xb9\xc3\x5d\xee\x70\x97\x3b\xdc\xe5\xa7\x8d\x5f\x05\xee\xd2\x49\x41\x9f\xbe\xc0\xae\x7b\x34\x9b\xa1\xaf\x9c\x3d\x36\x60\x4e\x63\xe7\x78\x1a\xe0\xa8\xde\xa3\x85\xb6\x15\x99\x6b\x84\xf5\xd3\x01\xbd\x78\x06\x44\x20\x40\x47\x52\xcd\xfe\x70\xc3\x7a\x91\x13\xff\x22\x12\x25\xd2\x01\xb2\x8f\x0e\xda\x7a\x44\xe8\x15\xac\x1a\x31\x44\x87\xe3\x2e\xb6\x32\x6c\xff\x9b\x02\x55\xf1\xa0\xe7\xb6\xb1\x85\xce\x5f\x30\xb2\x5f\xe6\x45\x2a\xa5\xd1\xbe\x36\x47\xbe\x19\xfe\x11\x3d\xb5\x11\x64\xe6\x20\xc2\x54\x3c\x13\xf0\x62\xb6\x26\x78\x32\x4c\x25\xe1\x50\xb2\xe4\x83\xc9\xe0\x70\xc2\x1e\x5c\x50\x4f\x28\x1b\xe5\x94\xb2\x41\xd8\x21\x45\xca\xba\xeb\x89\x12\xa9\x2f\x47\x37\x42\xbb\xf3\x92\xc3\x1f\x5f\x25\x2f\x1a\x0e\x69\x98\x73\xf3\xad\xc0\x76\x5e\xa2\x2e\x53\x2e\x14\x83\x74\xdc\xd1\x80\x95\x7a\xef\xe6\x39\x31\xe5\x12\xc6\x08\x38\x49\x16\x34\xd1\x45\x12\x24\x8a\xad\x6f\xef\x45\x87\x67\x4a\x5c\x36\xd4\x44\x56\x33\x03\x6a\x2d\xa5\xb9\x5a\x37\x3a\xe0\x3c\x58\x1e\x78\x16\x1f\x7d\x33\x76\xa0\x55\xf2\xd8\x81\x56\x77\xa0\xd5\x1d\x68\xf5\x23\xe3\x2e\x63\x31\xf1\xa9\x94\xc6\x93\x5b\x85\x02\x1a\x08\x5e\x24\xda\x75\x6b\x38\xd8\x99\x08\x51\xa1\x14\x0d\x0d\xa1\xa0\x00\x82\x0d\x38\x56\x4d\xdf\xd2\x36\xf5\x8c\xb5\x3e\xfb\xd3\xe9\x3e\x56\xfb\x24\x4f\xd2\x69\x07\x3b\x9f\xfc\x89\x09\x95\xe9\x1c\xd5\x44\xca\x72\xce\x65\x6d\x2c\xb8\xb8\x5d\xce\x6d\x1c\xc0\x72\x19\xd6\xb4\x83\xd9\x26\x88\xc4\x35\x4c\xbd\xaa\x49\x2f\x70\x68\xbf\xf7\x6a\x64\xe7\xcc\xbb\x64\x90\xe3\xc2\xdf\x90\x7e\x3d\xda\x36\xf8\xe8\x9b\xb5\x92\x53\xd4\x7d\x21\x4b\x09\x2d\x2e\x72\xf0\x60\x84\xb1\x86\x1d\xe0\x3f\x4e\xb3\xaa\x49\x53\xfa\x20\xa5\x14\xa5\xae\x57\x47\xad\x78\x27\x36\xd1\x05\x6c\x45\xa1\xec\x43\xe7\x09\xa6\x5c\xeb\xa6\xae\x85\xb2\xc5\xea\xd7\xe5\x4d\x26\x1f\xbc\x71\x9c\xc9\xf6\xd4\x52\xdb\x0c\x74\x63\x8d\x0e\x30\x08\x4e\x74\x0d\x00\xe5\xd0\xae\x38\x24\xf6\x3c\x81\x62\xca\xe9\x67\x3d\x20\x37\xc8\x14\xea\x86\xdd\xf0\xda\xd0\x4f\x16\x1b\xcf\x03\xcc\xe5\x8d\x34\x9a\x04\x5c\xed\x09\xd9\x9a\xd6\x1f\xc1\xbb\xd7\x8d\xad\x1a\xeb\x6d\xa8\x71\x02\x06\xe2\x7d\xa5\x8d\xc8\x3b\x9d\x99\xba\xb7\x03\xc7\xfe\x39\xad\xcd\x5a\x18\x15\xb7\x56\xd4\xea\x05\xfb\xef\x83\x7f\x7c\xf1\x61\x72\xf8\xf5\xc1\xc1\x4f\xcf\x26\x7f\xfe\xf9\x8b\x83\x7f\x4c\xe1\x0f\xbf\x3b\xfc\xfa\xf0\x43\xf8\xcb\x17\x87\x87\x07\x07\x3f\xfd\xfd\xf5\xb7\x57\x17\x67\x3f\xcb\xc3\x0f\x3f\xa9\xa6\xbc\xc6\xbf\x7d\x38\xf8\x49\x9c\xfd\xfc\x89\x42\x0e\x0f\xbf\xfe\x6d\xd2\xb4\xb9\x5a\xbd\x4d\x78\x30\x19\x68\xa9\x71\xcc\xed\xbe\xac\xe4\xcb\xc1\xd8\xfb\x49\x87\x27\x9a\x48\x65\x27\xba\x9e\xa0\xd8\x17\xcc\xd6\x44\x88\x2b\x8e\x70\x00\xc7\xd4\x84\xef\x92\x5f\x8b\xe1\xdc\xba\xe0\xcb\x23\x29\x2b\xc0\x0e\x9e\xca\x44\xbe\xd8\x33\x2f\x25\x8d\xb6\xc9\x8a\xb2\xd2\x35\xaf\x57\x2c\xf7\x19\xdf\xd5\x08\x5d\x5d\x7a\x6d\x5d\x92\x5b\x35\xc3\x6a\xe5\xb2\x7e\x40\xca\xd8\xa4\x7e\x2d\x22\x97\x4d\x99\x0e\x45\xf8\xd1\x6d\x02\xc0\xe3\xf5\x3c\xa9\x11\x23\x4e\x28\x10\x72\xcd\x78\x76\x8d\x41\xaf\x76\xbf\x69\x0e\xde\xd5\x5a\x27\xf6\x3d\x8f\x44\x2f\x05\xa7\xe6\xcd\x31\x28\x07\x35\x22\x3a\x17\xee\x10\x85\x5f\x80\xdf\x90\x88\x99\x50\x1e\xb4\xeb\xa9\x1e\x0e\xbc\xf0\x43\x5a\xf6\xbc\x66\xaf\xc1\x3c\x7f\xb4\xf3\xcd\x92\x3b\x13\xc8\x7f\x8b\xef\x9c\x17\x93\x7e\x58\xaf\xb4\xe5\x05\xe3\xbe\x41\xd4\x9c\x15\x3a\xe3\xb4\x80\x62\xa8\x14\x1b\x18\xac\x70\x5e\x83\xbe\x23\x89\x0d\x35\x3e\xee\xd4\xba\xef\x46\xf7\x0d\x32\xfe\x85\x21\xf2\x87\x56\x55\x21\x33\x3e\x2b\x04\xcc\x11\x9d\xb5\xa4\x93\xea\x26\x57\xf2\xf7\xb2\x6c\x4a\xd6\x18\xb7\x0a\x5a\x0d\xe5\x92\xc4\xb6\x0f\xc5\x2d\x6a\x00\xbc\x62\xa5\x54\xee\x17\xd1\x56\x73\x00\x3e\x72\xd2\x2e\xc3\x59\xea\xc2\xd4\x24\xc9\xc0\x9f\x1b\x22\xa8\xa6\x81\x08\x8f\x5f\x01\x74\xb8\x69\x97\x75\x0e\x98\xf7\xd6\x57\x33\x0c\xf8\x59\x2a\x9d\x10\x8b\xea\xe9\x3e\x25\x8b\xbe\xf2\xa3\xe3\x43\x91\x0a\xca\x1f\xcc\x46\xf9\x32\xd2\xbe\x86\xa1\x2d\xaa\xb5\xd5\x76\xa5\xd4\x18\x51\x4f\x16\x8d\xcc\xc7\x51\x47\x9f\x95\xa5\x4f\xb6\xef\xd3\xad\xfa\x64\x5b\x7e\x54\x0b\x7e\x9e\x25\x19\xa2\xaf\x5e\x0e\x7b\x16\xbc\x92\xb3\x5a\xb0\x97\x4b\xae\x94\x88\x7f\x03\x3a\x23\xdd\xd9\x3f\x4e\x3f\x87\x26\x06\x40\xb0\xe4\x5b\x17\xec\xa7\xf5\x2e\x08\x1a\x46\xb5\x8e\x33\xc2\xb1\x29\xb6\xee\x03\x56\x0e\xef\x1a\x0b\x7c\x7c\xec\xfa\xda\x0f\x5b\x0a\x60\xd9\xce\x52\xdf\xb2\x5c\xb3\x5b\xda\x9a\x56\xb5\xb8\x11\xca\x22\xd7\x9d\x09\x05\xc8\xdd\x6e\x91\x84\xce\x6b\x5d\x42\xdd\x61\xad\x4b\x69\x02\x62\xcf\x5f\xce\x07\xb7\x83\x8b\x86\x04\x04\xba\x0b\x9f\xf0\xea\x25\xb3\xbc\x5e\x10\x8b\xeb\x8a\x46\x31\xd5\x94\x33\x91\xe0\x0f\x3c\x16\x16\x77\xd7\xe1\x61\xcc\x0e\x0f\x0f\xdf\xa4\x01\x8f\xed\x8f\x3f\xbe\x19\xa1\xd9\xec\x58\xf7\xe1\x56\xd7\x45\x7e\x2b\x73\xe4\x8d\x30\xec\xc0\x4d\xef\xf0\x3f\xa7\xcb\xeb\xed\xad\xcc\xc7\xde\x0e\x52\x79\x8f\x9f\x8e\xdb\x0e\x06\xfb\xe1\x99\xf7\xa5\x73\xa7\x0e\x60\x9a\xb4\x48\xca\x99\x04\x80\x0b\x48\x60\xba\x76\xef\xc2\x4c\x2a\x6e\x13\xd0\x94\xdd\x41\x06\x4b\xcf\xe9\xd4\x00\xd1\x32\xc2\xd2\x72\x23\xb3\xc6\xc3\x2d\xb4\x5d\x32\x23\xcb\xa6\xb0\x5c\x09\xdd\x98\x62\x95\x70\x55\x3f\xa7\xe3\x38\x2f\xc4\x7b\xd4\x50\x69\xfe\x42\x2b\x26\x2d\x72\xbd\x10\x4a\xd4\x32\x0b\x70\xb6\x0d\xc7\x01\x18\x3c\x8c\xd4\x4a\xe4\xc7\xc1\x89\x88\xfe\x25\x0d\x58\x27\xc0\x39\x24\x32\x36\xe3\xce\x57\xa8\x8a\x66\x21\xa3\xd1\x95\x3b\xc6\xa0\x5f\xf8\xc9\xfb\x63\x0c\xea\xb8\x2e\x1a\x23\xda\x80\x62\x4a\x1c\xf1\xa1\x4d\xd4\x27\x49\xbc\xf3\x1f\xeb\xbc\xf5\x53\x1f\xbd\x4f\xcf\x45\x25\x54\x4e\x8c\x50\xaa\xbe\x5e\xc4\x2d\x7b\xf0\x53\xe6\xe1\xb5\x63\x5a\x1b\x67\xef\x6d\xcd\xdd\x93\x5e\x52\x0b\x54\xfd\xa4\x9c\x73\xcb\x55\xca\x53\xfb\x39\x17\xa1\xb3\x9d\x77\xf5\xd9\x7b\x57\x9f\x49\x23\xf0\xcf\x83\xc2\x0a\x6d\x30\xaf\x28\x89\x14\x34\x58\xac\x00\x14\x50\x5b\xe8\x9c\x68\xca\x0a\x39\x9b\xfa\xad\x2c\xa6\x43\x5a\xa7\x14\xb1\x81\x0a\x6a\x48\xeb\x44\x7b\x19\x37\xa8\xa0\xfa\x64\x4e\xd4\x1d\xda\xb2\x33\xbb\x76\x87\x3b\x5e\xa8\xd8\xf1\x44\x78\xa1\xe6\x85\xce\xae\x13\xbb\x7a\xbf\x42\x19\x6b\x19\x32\xfc\xc7\xe8\xef\x59\x6f\xeb\x3d\xcc\x88\x81\xc1\x1c\x0f\x57\x0a\xdd\xbc\x41\x1b\x7a\x13\x16\x0b\xe3\x04\x79\xa2\x4e\x51\xd5\xda\x69\x96\xfa\x46\x66\x82\xcd\x84\x7b\x0c\xea\x46\xa9\xf8\xe7\xfb\xb1\x68\x65\xb8\xe5\x46\x58\x2a\xaa\x7b\x48\x3a\xd9\x53\x03\x5e\x2e\x19\x1b\x23\x72\xc6\x0d\x2b\x85\xe5\x4e\x12\x9b\xfc\x15\xa9\x65\xa9\xaf\x6a\x37\x23\xf0\x8b\xc3\x71\x25\xb2\x08\x31\xac\x05\xcc\xb4\x32\x32\x17\x7e\xae\xb9\x3b\xfa\x19\xa7\x71\xbe\x24\x59\xcc\xfe\xcb\xbe\xff\x7e\x84\x76\xa5\x4e\xc8\xda\x1e\x26\x58\x1d\x00\xef\x90\xff\x6a\xfa\x51\x54\x40\xa9\x90\x44\x86\x5d\xa3\x9f\x2d\xf2\x32\x2f\x32\x71\xe1\x54\xb5\xb1\x42\xd9\x53\x69\xae\xd3\x30\xad\xdf\xbe\x3c\x1b\x8a\x4b\xa1\xca\xe2\xec\xdb\x97\x67\xcc\x4b\xb9\x03\x55\x10\x9f\x23\xe9\xeb\x5c\xaf\x74\x37\x60\x05\xd1\x52\x87\x30\x04\x04\x26\x25\xc2\x0a\x17\x99\xa8\xda\xa5\xcc\xa5\xb9\x7e\x40\xfc\x2c\x3d\x60\x59\xe5\x6f\xe2\x83\x7d\x4f\x11\x17\x41\xcd\x5c\x74\xf5\xd9\x70\x54\x57\xba\x61\xb7\x5c\x41\x0f\x71\x0c\xbb\xd1\x94\x8e\xac\x5e\xb0\x33\x65\x9a\xda\xcb\x1d\x21\x89\x0f\x9f\xe9\x3c\x9d\x6d\x81\x3b\x62\xa8\xc7\x07\xfb\x7a\x81\x3b\xec\xbb\xfc\x22\x84\xef\x48\x72\x87\x21\xbf\x6d\xd8\x0b\xaa\xc3\xd3\xe2\x35\x36\xb0\x17\xee\xf6\x12\x03\x15\xa3\xdd\x78\xda\x51\x19\x82\x46\xd6\x00\x20\xd4\xd8\xcb\xf0\xbc\x6d\x02\x40\xc8\x97\xe5\xb1\x40\x23\x15\xaf\x2d\x44\x05\x47\x00\x4f\x03\x47\x9e\x17\x97\xc2\x07\x72\x0f\x8a\xe3\x7c\xce\x74\x29\xad\x0d\x44\xf8\x3d\xd4\x6b\x4a\xf2\xc0\xa9\x0a\x3f\x5b\x67\xbe\xf6\x2e\x3a\xcd\x00\x6a\x8b\x1a\xd9\x71\x2e\x6e\x8e\x4d\xce\x9f\x1f\xc1\x12\xe0\x8d\xa4\xc5\x22\xed\x60\x5f\xb8\x61\x7b\xcf\xf7\xa6\xec\x52\x96\xb2\xe0\x75\xb1\x4a\xee\x24\xda\xc9\x76\xa6\x77\x98\x38\x35\x90\xb7\xf7\x6c\x8f\x1d\xe8\x1a\xbe\x3a\xe3\x8a\x15\x82\xdf\x60\xfc\xd4\x3f\x8f\xb4\x45\x80\xc0\xd8\xe1\xe3\x5b\x23\xec\xd1\xd1\x4d\x68\x9a\xa4\x5f\xf7\xef\xd1\xf0\x57\x9d\x53\x48\xda\x98\x8b\xd3\xce\xaa\x95\xca\x99\xba\x53\xf6\xbd\xb7\x21\xbd\x53\x41\x3f\xf6\x6e\x87\x5a\xa9\x4f\x67\xeb\x1f\x3d\x73\xd2\xa6\x22\xda\x3c\x05\x69\x89\xef\xce\x6d\x50\x39\x73\x06\xb9\x8d\x4d\xa2\xef\x07\xe9\x88\x76\x8f\x1b\x4f\xc9\xb9\x2c\xa4\x7d\x27\xaa\xe8\x92\x89\x35\x87\x14\x85\x0c\xe3\x77\x0b\x19\x6f\xdf\xd6\xa2\xd2\x46\x42\x79\x25\xb7\xd0\x29\xb8\xb6\x32\x6b\x0a\x5e\xb3\x5a\x20\x4e\x25\x7e\x93\x4e\xcf\x2e\xde\x9d\xbd\x3c\xb9\x3a\x3b\x7d\xc1\xc2\x4c\x65\x3f\xe2\x12\x2f\xf2\x4a\x77\xc0\x19\xc6\xbb\x92\x19\xa4\x61\xa2\x7f\xfb\x91\x7f\xf8\xb9\xea\xea\x91\xa0\x47\x39\x57\xec\x5c\x49\x4b\xa7\x3b\x00\x73\x27\x2b\xb4\x12\xc6\x07\xda\x2b\xed\x71\x3b\x0b\x69\x8f\x68\x0e\x39\x4e\xd6\x89\x1b\xce\xd6\xfd\xcb\x05\x14\xb8\x92\x82\xa2\xf0\x85\xd1\x39\x91\x47\xf1\xc4\xbb\x03\xfb\x90\x51\xd7\x50\x93\x9a\xac\xaa\xaf\x00\x02\xd8\x2b\x6a\x06\x3b\x93\xa6\x06\x03\x2b\x50\x20\x48\x1a\xb4\x3f\x27\x89\xc4\x76\xa0\xce\xba\xde\x9f\xee\x07\x27\xba\x90\xde\xca\x4e\x31\xfe\xbb\x0f\x86\x2c\xba\x2f\xf6\xa3\xdc\x5a\x36\xd0\x5a\x53\xc6\xde\xda\xa5\xa8\x6f\xa5\x11\x47\xce\xdb\x4d\xa4\xa3\xeb\xd9\xc1\x30\xd1\x3e\xf9\x54\xfa\x6c\x83\xcb\x69\x9a\x59\xbb\x20\x24\x91\x6d\xa7\xd5\x85\xbc\x11\x8a\x7c\x88\x12\xcd\x97\xf0\x59\xc9\xb7\xe2\x5d\xb7\x42\xdf\xbf\xfb\xee\xe1\x3f\x04\xdf\x95\xe4\xcf\x78\xa9\xcb\x52\x5a\xb6\xe4\x66\x19\x1a\xa3\xd0\x12\x2a\x2d\x6d\x26\xfd\x0d\x4e\x09\x9e\x63\x1b\xbf\x79\xb4\xa2\x5c\xb3\x51\x82\x98\x34\x3c\x6d\x27\xc6\x97\x71\xab\x0e\x86\xd7\x63\x84\x20\x84\xe1\x3f\x85\x11\x42\xf8\x00\xc0\xf4\xfa\x4f\x60\x5a\x7a\x23\xf2\xb8\x5d\xa3\xe3\x77\x67\x27\xa7\xaf\xcf\xa6\x65\xfe\x59\x3c\x9f\x42\xe5\x95\x96\xf1\x69\x0b\x12\x87\x77\xca\x93\xdb\x4e\x34\xdd\x3b\x3a\x0b\xa2\x20\xe1\x89\xe0\x5d\x5a\xd0\xc1\x0b\x42\x27\x19\x0e\x5f\x2e\x2c\x97\x85\xe9\x4e\x29\x31\x64\x5b\xe9\x42\x2f\xb6\x93\x36\x44\x1c\xc1\xdf\x20\x81\xe3\x84\x4f\xdc\xd9\x7e\xf8\xd8\x23\x8d\x39\x73\xb8\x5b\x40\x69\xec\x11\xd6\x69\xab\xda\xc6\xb3\xa0\x89\xfa\x67\xbc\xb0\x4f\x38\x50\xd0\x69\xe7\x10\xe5\xa5\x85\x5f\x67\x02\xd5\xbb\xc8\xd1\xb6\x69\x7b\x94\xb1\x4a\xd4\xa5\x34\xee\x11\x24\x42\x77\x36\xa3\x0e\x63\x10\x17\x3c\xce\x01\xa2\x04\x1c\xdc\x4b\x49\xa1\x1d\x1f\x9e\x9c\xbf\x79\x29\x69\x8f\x79\x55\x8b\x89\x78\x2f\x0d\xc4\x94\x80\x78\x56\xd7\x3d\xdf\x80\x00\x17\x69\x6d\x81\x90\xec\x0e\xa9\x74\x94\x4a\xc0\xfb\xae\x25\xbd\x3b\xff\x38\x60\x24\xa2\x25\x42\x49\x10\x2f\x8a\x15\xd0\x51\x63\x13\x33\x4c\x38\xf1\x05\x2c\x24\x81\x1f\x17\x91\x6d\x55\x2d\x6f\x64\x21\x16\x6e\xba\x4b\xa9\x16\xc6\x33\x49\xd7\x82\xf1\xa2\xd0\xb7\x34\x24\x81\x11\x62\x63\x5d\xdd\xa5\x31\xb6\xc7\x55\x12\x2d\x18\xfc\xa9\x37\x6f\xaf\x98\x12\x38\x5d\x93\x1c\xc3\x75\x13\x24\xf5\x16\x99\x4c\x26\x90\x51\x3c\xf8\x1f\xad\x84\xc9\x8b\x43\xf6\xa3\xf0\xf3\xd2\xac\x16\x4e\x4f\x13\x30\x9e\xb7\x4b\x0d\x39\x8e\xc6\xf8\xb5\xeb\x4e\x36\x28\x37\x82\x91\xaa\xf2\x20\xf5\xd8\x49\x76\xee\x3e\x9a\xc1\x03\xf9\x14\x02\x48\x6e\x58\x87\x8f\xff\x3c\x62\x3f\x0f\x6c\x82\x8e\x68\xcf\x04\x84\x56\x92\xd3\xdf\xf3\x7c\x5a\xf4\x76\xe5\xad\x25\x5a\x7e\xce\xac\xca\x42\xaa\xeb\x23\x26\x6d\xa8\x6e\x70\x5a\xc3\x13\xfe\x28\x5a\xa6\xde\xeb\xcd\x5a\xf0\xe2\x6e\xdb\x8b\x72\xcb\x1f\xdc\xee\xb2\xa3\x40\x70\xae\x56\x15\x56\xf7\x85\x27\x34\x21\x93\x32\x30\x66\xf6\xf6\xc6\x32\x64\x9e\xfc\x6e\x48\x93\x19\x99\x66\xc0\x9c\x5f\xbe\xbc\x3c\x1f\x58\x2f\x8a\xc1\xbf\xc5\xa7\x37\x3e\x0a\xdd\xa3\x58\xc0\x8f\x07\xdd\xbb\xcb\x84\x85\x05\xff\xcc\x62\x1b\xf2\x5f\xb1\xcb\x34\x61\x45\x13\xff\x33\x58\xc6\x7f\xa1\x6b\x1b\x4d\x26\x98\xf2\x1c\x65\x4b\x5e\x9d\x34\x76\x79\x2a\x4d\xa6\x6f\xc4\x08\x61\xd7\xdb\xa5\x00\x2b\xd2\x43\xe5\x98\x24\x5d\x07\x86\x57\x02\xe7\xc4\x5e\xfe\xed\xe4\x82\xf1\xc6\x9d\x59\x2b\x33\x6a\x61\x55\x5a\x49\x5b\x58\xa9\x4b\x61\x46\x89\xeb\x8e\xb7\x4e\x7e\x46\x4f\x62\x95\x76\x10\xd3\x1d\xc4\xf4\x57\x02\x31\x85\xd7\x8a\x76\x3c\x76\xb0\xd2\x4f\x19\x52\x49\x2b\xb9\xd5\xe4\x76\x12\xc3\x3c\x59\x63\xac\x2e\x51\x8d\x02\xca\x02\x44\x93\x16\xe5\x0d\xc0\x34\xcf\xe7\xc3\x19\x0e\x2a\x5d\xe9\x09\x56\x38\x56\xe7\xca\x8a\x7a\xce\x33\xb1\xc6\xae\x43\xbb\x90\x4a\xdc\xfa\xef\x96\xad\xdc\xbf\xa0\x41\xc1\x2a\xb0\x28\xfe\xfa\xe2\x2f\x09\x44\x48\x8a\x97\xe2\xaf\x6d\x8e\xbd\xed\xf2\x9d\x90\x8c\xcc\xb4\x52\x22\xb3\x0f\x9d\x84\x74\x43\xfe\x2b\xfd\xed\xf6\x80\x0b\x5c\xf3\xff\xdb\xf0\x82\x7e\x26\xde\x3c\x46\x96\x7d\x78\x06\x93\x97\x23\xdc\xb9\x70\xf6\x08\x65\x2d\x38\xe0\x2d\x6d\x8c\x00\x3f\x0a\xa5\xda\x9a\x2b\xe3\x0e\x71\x7a\x9c\x7e\xdf\x03\xba\xf7\xd9\x81\xcd\xaa\xc3\x07\x5f\xf5\x31\xb8\x17\x71\x51\xfc\xf9\xfb\xae\xe5\x4f\xa4\x7f\xcb\xa3\x21\x8c\x41\x2f\xa5\x27\x62\x07\x0b\x42\x72\x9f\x70\x7c\x27\x8d\x45\xca\x74\x9c\x99\xd3\xf6\x02\xd9\xda\x08\x66\xb0\x1b\xe7\x17\x4c\xd7\x4c\x56\xff\xe4\x79\x5e\xbf\x40\x1b\x3f\xf4\x64\xae\x69\x16\x9d\x34\x3e\x44\x0f\x7c\x08\xa1\x3e\xe1\xc0\xae\x2a\x99\xf1\x82\x48\x43\x72\xf5\xf2\x02\x66\x64\xd8\x9f\xfe\x80\xad\x38\x7f\xff\xe5\x1f\x9e\x25\x5c\x8f\xcf\x89\xe7\x8d\x8d\x99\x09\xfd\x4f\x42\x4c\x3f\x15\x7e\x18\x70\x7a\x91\x19\x06\x2c\x12\xba\x1b\xed\x0d\x26\x77\x01\x5a\xbb\x6f\x2c\x77\x7a\xc7\xef\xb1\x36\x76\xfc\x1e\xa4\x91\x96\x79\xe8\x85\x18\xef\xe3\xdd\xa5\x13\xcc\x5d\x6c\x3e\xb9\xf8\x7c\xd2\x5e\xc9\x3b\x9e\xdc\xf6\xf9\xa4\xda\xa6\xdb\x9e\xdc\xee\xf9\x24\x49\x1d\xe9\xc9\x25\x9c\x08\x8a\x66\x18\x6a\x04\x7c\x55\xf6\xf7\x8d\xaf\x6b\xf4\x1c\x89\x84\x44\xc5\xe9\x9b\xcb\x7f\x7e\x77\xf2\xcd\xd9\x77\xb0\x16\x9e\x2d\xc2\x5d\x71\x62\x81\x28\x85\x53\xe0\xd3\x55\x0c\x2d\x6f\x41\xd9\xa2\x54\xb4\xec\x9b\x57\x97\x6b\xc9\xa9\x37\xaf\x2e\xa3\x57\xf3\xa3\x10\xd9\x16\xf2\x1a\xef\x0e\x79\x88\x6c\x6a\x26\x57\xcd\x89\x3b\xf2\xf4\x81\x06\x13\x60\x15\x7a\x50\x52\xdb\x11\xf1\x09\x21\x89\x29\xde\x63\x40\x9b\xa4\x20\x7d\x10\xdc\x9d\x64\x5c\x8b\xd1\x80\x76\xf7\x7c\xc6\xd8\x93\x71\x4b\xc6\x07\x68\xba\xdd\xc0\x5d\xf5\x51\x7b\x0f\xb3\xa4\x47\x45\xb7\x43\x33\xc7\xe1\xec\x4c\xc6\x8a\x24\xed\x3e\xdd\x45\xaa\xc7\x60\x97\xde\xbf\x04\x39\x01\xfc\xec\xd4\x37\x9d\x62\xba\x76\x56\x99\xb3\xad\x84\x31\xc1\x17\xe8\xdd\x4c\x92\xd0\xc7\xdc\x1b\xc2\xcd\xec\xaa\x7c\xd1\xfc\x79\x59\x70\x19\xdd\x01\x73\x4d\x59\x6e\x13\x89\x7f\xbc\xa4\xb5\xfd\x1e\x54\xf0\x0e\x58\x4f\x39\xdb\xfa\xdb\x08\xb9\x1b\xac\x39\xe3\x9e\xd5\xc2\x54\x3c\x4b\xc3\x40\xc7\x6c\x7e\xb7\x09\x93\x70\x0e\xba\x7f\xc2\x7f\xc9\xdc\x67\x7d\x1e\x86\x01\x4c\xf5\x81\xa9\xa0\xda\xdf\x99\xae\x60\x5e\x06\x51\x63\x50\xd8\xdf\x71\x3e\xa9\x81\xaf\xcd\x33\xca\xb8\x09\xd8\x25\x92\xc4\xc6\xb7\x70\x92\xa6\xed\xb3\xf9\x80\xf6\xc8\xbd\x1d\x7c\xf6\x64\xac\x95\x1f\xb7\x13\x62\x93\x96\xb6\x0b\x9b\x6e\x0f\x81\xd2\xcc\xa0\x87\x0f\x99\x56\x4b\x6d\xb5\x1a\x91\x7c\xf0\x62\x8b\xc0\x34\xfa\x41\x94\xf8\x12\xc9\x50\x0b\x51\xf7\x1e\x4a\x46\xe2\xe7\x6a\xa1\x8f\x5c\xe5\x6d\x19\x8f\x56\x01\xd4\x98\x46\x01\xfb\x39\x3c\x0b\x55\x7e\x7e\xfa\x80\x2f\xc2\xae\xed\x06\x4d\xe6\xe7\xdb\x33\xf1\xa1\x9f\x08\x77\xa0\x93\xcf\xd7\xf9\xa9\x8f\x2b\x04\x2e\x59\x9a\x6f\x88\xea\x8a\x8d\xa8\xaf\x52\x7c\x0a\x5d\xdb\x5b\x5d\x8f\xd1\x70\xea\x62\x20\x2a\x4d\xa5\x87\x69\x6d\x70\x71\xf7\x34\x72\xb4\xd8\x75\x0d\xfe\x39\x69\x64\x5c\x87\xcf\x52\x2b\x5f\x42\xc1\x48\xcf\x2f\xa4\x22\xb7\xe6\x1b\xfa\xdd\xc7\x87\x9d\xaa\xa6\x45\xf8\xb6\xa8\xf7\x4e\x55\xa7\x04\x0d\x87\xea\x3d\xa8\x6a\x92\xc4\x0d\xf5\xde\xc7\xbd\xd2\x94\xf1\x36\xb5\xfe\xd8\x2a\x7a\x34\x2b\x7e\x03\x63\x30\x6c\x9c\x43\x5a\xb1\xcd\x30\xe6\x7d\xfb\x09\x0f\x1e\xf4\x0b\x1a\x26\x79\xfd\x7f\xf0\x82\x7c\x32\xcd\x99\x11\x69\x0f\x26\x6f\x9f\x16\x7a\x4b\x4d\xfa\x03\x59\x6b\xa7\xe8\xe3\x75\xf6\xd0\x74\xb0\xa2\x34\x00\x12\xe1\x45\xe1\xf6\x5a\xab\xae\x95\x62\xfc\xaa\xf8\x56\x32\x47\x2c\xd3\x6a\x2e\x17\x25\xaf\xcc\x11\x3c\x8d\xb9\xbe\x55\xb7\xbc\x8e\x57\x5d\x27\x17\xb1\xc8\x95\x47\x79\x06\x69\x0b\x96\x44\x95\x86\xda\xe4\xb5\xce\xd3\x9f\x42\x27\x84\xcd\x24\xaa\xa6\xc6\x90\xdb\x49\xb4\x30\x68\xf7\x72\xb9\x17\xc7\x4f\x72\x9a\xf8\x12\xde\xf0\xa2\x11\x6c\x26\xec\xad\x10\x8a\x21\x3a\xe0\xd9\x1f\xff\xf8\x47\x22\xfe\xb5\x2b\xe6\xee\x25\xd4\xb1\xc2\x97\x13\x4b\x2a\x94\xb6\x8c\xcf\xe7\x70\x21\xf1\xa9\x95\x26\xa8\xd0\x94\xee\x54\x72\xb1\x04\x67\x4f\x2a\xb8\x52\x85\xcc\x12\x58\xe7\x10\xc4\x13\x1a\xea\x21\x81\x01\xcc\x39\xc9\xf2\x61\xa5\xce\xc5\x11\x2b\xe4\xb5\x60\x73\xf3\x6d\xad\x9b\xea\x28\x94\x53\x92\x84\xd6\xc2\x34\x85\x85\x02\xff\x99\xf0\x93\x2e\xc3\x09\xa5\xbe\x65\x9f\x1f\x1a\xd8\xab\x94\xe4\xcb\x5d\x48\x67\xf1\xcd\x5b\x96\x1e\x7c\x37\xdc\x11\x20\x2f\xc9\x23\xa0\x59\x87\x1e\x5d\xfb\x0d\x78\x8a\x4b\xd2\x6c\x18\xaa\x97\xf6\x21\x65\xbc\xd0\x6a\x81\xb9\x57\x3a\x26\xaa\x1f\x52\x09\x65\x76\xab\x8a\x68\x61\x24\x22\x25\xd3\xb1\x92\xcc\xbf\xe4\xaf\x79\x45\x15\xb0\x0e\x92\xeb\x1a\x01\x32\x3e\xd3\x0d\xed\xbb\x70\x78\xda\x18\x9c\x1f\xb4\xbf\x81\x3e\x81\xb8\xa1\x64\xb9\xc9\x8b\xce\x46\x59\x78\x96\x76\x61\xc2\x18\x9a\x7b\x03\x57\x8a\x56\x59\xd5\x0d\xc1\xb3\x25\xbb\x16\xab\x09\xbe\xcf\x15\x97\xd4\x2b\x13\x86\x7f\x8a\x4f\xdd\x4e\x02\xca\x94\x5a\xe4\xda\x8d\x1e\x36\x37\x13\x39\x7b\x19\x8e\x4b\xa2\xd8\x50\xf9\xd5\x69\x0f\xa9\x88\x71\xdd\xe1\x5c\x43\x60\xc9\xf8\x08\x40\xea\x3c\x97\xda\x78\xb2\x79\x9f\x8a\xbc\x16\x54\x65\x19\x86\x7b\xdc\x33\xad\x20\x2e\xe8\x85\xc2\x09\xa0\x23\x82\x71\x9c\xf7\x59\x65\x3d\x37\x8a\xa1\xc6\x3b\xba\x71\x2d\x56\x66\x73\xbf\x92\x4f\x6a\xe8\x97\xda\x72\x88\x3a\x03\xd2\xa4\xde\x29\x04\x7b\xe2\x77\x77\x33\x4f\x14\xea\xac\x53\xf8\x76\x08\x74\x41\x19\x27\x8d\xc7\xa6\x1b\xd7\x62\x35\x28\xfe\x64\xb7\x4b\x99\xd1\xcc\xd2\x6e\x48\x03\x53\xf5\xf3\x4c\x69\x85\xd2\x8d\xf6\xc2\x1f\xf5\xae\x57\xa2\x4c\x23\x6c\x53\xe1\x91\x82\xfa\x65\xb7\x63\xc2\xd0\x71\xf6\x38\x24\xdc\xa6\x92\xd7\xd7\x22\xf7\x56\x3a\x15\x40\xde\x8d\x0b\x77\x28\x59\xe9\x73\x46\xb5\x28\xb8\x95\x37\xa9\xdf\x0f\xc1\x6e\xbe\xea\x93\x62\x8f\xa0\xf9\xf6\xa7\xd3\x7d\xf4\xc2\x02\xc1\x76\xb2\x8e\xb6\x4b\xa4\xda\x4e\x12\x94\x66\xf3\x76\x63\x84\xc7\x7c\xdd\x6f\xe7\x15\x95\x91\xaa\x3f\x30\xfa\x04\x57\x1a\xe0\x50\x24\xa6\xb7\xf5\xe1\x3d\x6b\x1e\x20\x21\xa9\x07\x64\x0c\xa3\x0c\x07\x35\xca\xb3\x3e\x26\x23\x3c\xa5\x24\x08\xf4\xfa\x18\xc7\xd6\xc4\x71\x2d\x48\x81\xee\xf5\x31\x2c\x0d\x4f\x56\xe2\xed\xdc\x7a\xf6\x7d\xea\x89\x62\xa9\x89\x81\xf5\x51\x12\x43\x71\xeb\xe3\x8e\x5e\xef\xa3\x2c\x61\xb9\x16\xe9\x1b\x45\x28\xb8\xe1\xd2\x80\xb9\x9a\x6a\xff\xe0\x28\x5b\x8c\x03\x18\x96\xa3\xc8\x1c\x84\x0f\x47\x91\x08\x21\x48\x67\x4c\xb9\x77\x70\x34\xaf\x0a\x47\xcf\x15\xe8\x45\x7a\x47\x11\x1d\x4c\xe1\xc6\xd0\xb2\x69\x9b\x63\x10\xab\x1c\x45\xe2\x28\xf1\xce\xf5\x31\x88\x7f\x8e\xb4\x4b\xe9\x31\xd4\xf5\xb1\x16\x53\x1d\x47\xe6\x18\x71\xd9\xf5\x31\x88\xd3\x8e\xb8\x41\x69\xb1\xde\xf5\x61\x84\x8d\x26\x5d\xdd\x36\x52\x83\xc1\xeb\x23\x2d\x38\xbc\x3e\xa8\x25\x4a\xeb\xe3\x5e\xde\xed\xe0\x75\x8c\x63\x58\x32\x44\x45\x07\xea\x27\x2a\x9a\x6e\x7d\x94\xbc\x0a\xe1\x11\x66\xf5\x38\x7a\xf1\xb5\xf7\x91\xdc\x4b\x96\xea\x21\xe1\xe0\x33\xa3\x8b\xc6\x7a\xb2\xfd\x51\x27\x39\x8e\x23\x87\xa3\xcd\xa7\x89\x42\x94\x42\xa5\x5a\xec\x38\xb0\x69\x92\x9f\xee\x38\xba\xa1\xed\xe2\x34\x9a\x52\xf4\xbe\x54\xba\xd7\x89\x63\x24\x03\x35\x85\xf2\x00\xc7\x56\xe2\x83\xf1\x42\xb3\x36\xb9\x64\xa0\x1b\x4f\x84\x3c\x01\xc7\x76\x0a\x85\x44\xa1\x9e\x80\xa1\x4f\xa4\x90\x1a\xce\x4b\xa6\x61\xc0\x31\xd2\x79\xd5\xa3\x38\x3d\x83\x33\x7b\x89\x9d\x80\x03\xc7\xe8\x08\x47\xb7\x8d\x2c\x02\xa7\x51\xb2\xb9\x02\xc1\xde\xe0\xf8\xe4\x62\x2e\x55\x62\x90\x3a\x0d\x76\x85\x23\xe0\x76\x4e\x2e\xce\x9f\x68\xfe\xaf\x37\xc3\x90\x01\x4c\x90\xb8\xcb\x1d\xf6\xc7\x16\xa8\x18\xa1\x2b\xc7\x70\xf0\x16\x90\x70\xda\xed\xdc\x0f\x63\x44\xe5\x93\xb3\x65\x4f\x37\xd0\xbb\xb1\x54\xaf\xd2\x33\x83\x83\x92\xd8\xde\xad\x4c\x16\x6b\xb5\x07\x82\xb5\x36\x7a\xb2\x48\x6f\x9b\x62\x99\x21\xbd\x50\xb1\x3f\xe0\x0d\xfd\x35\x06\xa5\x9f\x58\x40\x19\xd6\x99\xc8\x6a\xb6\x3e\xd6\x29\x1c\xc6\x59\x32\x1c\x97\xa2\x70\xb6\x1f\xe3\xa3\x9c\x0c\x1c\x3a\x10\x2d\xe5\x2f\x98\xa6\x02\xb1\xd7\x07\x57\x4a\x5b\xb8\xa9\xe6\x88\x15\x7c\x26\x8a\xe4\xa4\x33\x0e\x00\x09\x70\x95\x77\x85\xc1\xe3\xcc\xb7\xee\x95\x2e\x8c\x18\x8a\x19\xe5\xda\xb1\x11\xaf\x1e\x83\xeb\x07\xa7\x87\xdc\x7d\x64\x38\xc6\xbc\x87\x6c\x40\xbc\x36\x8e\xbc\x75\x80\x3d\xca\x1e\x49\x74\x7b\x7d\x4c\xb6\x14\x65\x7a\x96\x33\x0c\x27\xf2\xd5\x88\x9b\x84\x43\x1a\x76\x5b\x4b\x6b\x09\x1d\x44\xee\x14\xa9\x98\x15\x75\x69\x98\x9e\x8f\x73\xc1\x59\x07\x1b\x37\x63\x85\xcd\xdc\xd8\xbb\x79\x4e\x2c\xe9\xdc\x1c\xa3\x66\xe4\x58\x78\x7f\x28\x6d\xf9\xee\x1a\x43\x24\xea\x98\x67\xa8\x8d\x6a\x8e\xf5\xfe\xb0\xd0\x66\xae\x18\x47\x53\xe2\x08\x3c\x27\x49\xbc\xef\xdb\x86\x73\xe1\x6e\x50\x87\x3c\xc9\xe3\xb4\x4b\xf0\xee\x12\xbc\xbb\x04\xef\x2e\xc1\x9b\x38\x76\x09\xde\x5d\x82\x37\x7a\xdc\x4b\x82\x77\x64\x17\x3a\x74\x14\xf7\x89\x9d\x51\xa0\xa6\x38\x20\x97\xa8\x3a\xb6\xed\xf1\xee\x21\x56\x9a\xfb\xc2\xc5\x91\x12\xaa\xee\x6d\x0b\x69\x5f\x9f\xaf\x1d\xe7\xf6\xd4\x21\x10\x36\xda\x1a\xec\xef\x4f\xa7\xfb\xfb\xa1\x7d\x28\xb5\x48\x73\x7d\xb8\xb7\xc7\xce\x27\x7f\x62\x42\x65\x3a\x1f\xef\x11\x72\xb6\x71\x6d\x2c\x04\x58\xc7\x0c\xd0\x8c\x7f\x56\x4b\x7f\x06\xc6\x4e\x53\xe3\x7e\x8d\xa7\xee\x46\xb2\x8c\x43\xc5\xf8\xab\x7b\x0b\xf6\xf9\xf0\xdc\x38\x61\x90\xae\xa9\x68\x72\x0e\x1b\xc7\xa0\x7f\xf9\x38\xca\x14\x88\x5f\xe9\x95\xf8\xdb\x46\x21\x4b\x09\x94\xd7\x39\xc4\xbc\x84\x19\x69\x3d\x0f\x50\xf0\x34\xab\x9a\x23\xff\x4b\xa6\xa5\x28\x75\x4d\xec\xdd\xb5\x3e\xc2\x64\xdd\x2f\x18\xcd\xb6\x6f\x85\xe2\x4c\x0f\x47\x8b\x75\x66\x4d\x5d\x0b\x45\xa5\x40\x59\x1f\xff\x41\x91\xd3\x70\xd8\x9f\x60\xe0\xb4\xbd\xdb\x54\x7a\xd0\x6d\x63\x8d\x32\x34\xfc\x8a\xd1\xc2\x29\x00\xfa\x69\xf7\x68\x34\xb1\x73\x5d\x7b\xc7\x75\xa4\xbc\x03\xeb\x01\x3e\x9c\xf4\xd1\x84\x0a\x75\xc3\x6e\x78\x4d\x22\xfe\xdc\x36\x46\x8f\x4d\xe6\xf2\x46\x1a\x3d\xd2\x83\xb1\x15\xef\x42\xa5\xb6\xd9\x36\xdc\x23\xa7\x1b\x5b\x25\x41\x36\x86\x03\x7d\xa4\xb1\x5e\x61\xe6\x27\xe9\x5b\x72\x8f\x26\xb3\x7d\x85\x9f\x78\x18\xfe\x39\x8d\x02\x71\x73\x54\xdc\x5a\x51\xab\x17\xec\xbf\x0f\xfe\xf1\xc5\x87\xc9\xe1\xd7\x07\x07\x3f\x3d\x9b\xfc\xf9\xe7\x2f\x0e\xfe\x31\x85\x3f\xfc\xee\xf0\xeb\xc3\x0f\xe1\x2f\x5f\x1c\x1e\x1e\x1c\xfc\xf4\xf7\xd7\xdf\x5e\x5d\x9c\xfd\x2c\x0f\x3f\xfc\xa4\x9a\xf2\x1a\xff\xf6\xe1\xe0\x27\x71\xf6\xf3\x27\x0a\x39\x3c\xfc\xfa\xb7\x23\x7d\x00\x57\xab\xb7\xa3\x18\xa1\x0c\x1b\x91\x8f\x1c\x1a\xe8\x4b\x1d\x51\x9d\x30\xf6\x7e\xd2\x81\x1e\x27\x52\xd9\x89\xae\x27\xf8\x0b\x5e\x30\x5b\x8f\x14\x67\x0d\xd7\xe1\x7e\xde\xc1\x71\x43\x12\xac\x37\xdf\x31\xef\x1a\xa6\x54\x9e\xd0\xc3\x82\xcc\x5b\x4f\x14\xa3\x87\x93\xdb\x11\x74\x6c\x1d\x3b\x82\x8e\x01\x41\x07\xf6\x2d\x4c\x94\xb9\x63\xe7\xd8\xb1\x73\xec\xd8\x39\x3e\x03\x76\x0e\xbc\xed\x7d\x6a\x0e\x24\xd7\x18\xe3\xfe\xf7\xa9\x39\x98\x4c\x55\x29\x3b\x6a\x8e\x1d\x35\xc7\xc7\xc7\x8e\x9a\xe3\x09\xa1\xa0\x77\xd4\x1c\x77\x8d\x1d\x35\x07\x7d\xec\x90\x5b\x3b\xe4\xd6\x0e\xb9\xb5\x43\x6e\xed\x90\x5b\x63\xc8\xdc\x21\xb7\x62\xc6\x8e\x9a\x63\x47\xcd\x31\xc2\xd8\x51\x73\xec\xa8\x39\x7e\x79\xec\xa8\x39\x88\x63\x47\xcd\xf1\x49\xe3\x9e\xa9\x39\x30\xac\xe8\x79\x39\x46\x88\x05\x3c\x35\x5a\x0e\xe3\xae\x48\x26\x4e\xb2\x4c\x37\xca\x5e\xe9\x6b\x91\x50\xd1\x79\xcf\xa9\xbf\x8d\x99\x26\x48\x7c\x52\x19\xc4\xf4\x60\x55\x62\x88\x69\x9c\xe0\x12\x6f\x72\x29\x54\x6a\x3a\x7f\x70\x88\x4e\xbc\xc8\xf4\x90\xbb\x3b\x42\xce\xb0\x57\xb9\xc8\xdb\x99\x26\x8a\xf5\x16\xaa\x75\x67\x71\xca\x4e\x58\x2d\x32\x59\xc9\x74\x53\x08\x5a\x75\x83\x54\xd4\x17\xbe\xbb\x58\xaa\xee\x91\xd6\x88\x62\x8e\xa6\x10\x57\x5d\xcf\xb2\x54\x3d\xd9\x25\x87\x7c\x52\xf7\x1e\x96\x17\xbd\x5a\x78\x5e\x6f\x65\x72\x48\xcb\x2c\x75\x53\xe4\xac\x16\xff\xe3\x9d\x7a\xbf\x87\xa9\x76\x40\xef\xdb\x5b\x60\x56\xea\x23\x84\xe9\xc7\x6e\xb3\xc6\x31\xc9\x78\x25\x9d\x42\x15\xf5\x18\x69\x93\x64\x53\x40\xbc\xaf\x64\x0d\x4f\xc5\xa5\xc8\xb4\xca\xc7\x04\x42\x9c\xad\xcb\x1e\x47\x97\x78\xd4\xb7\xc8\x59\xde\xd4\x63\xd0\xdd\xe8\x39\xbb\xe1\x85\xcc\xa5\x5d\xb5\x7c\x05\xf8\xe4\xa5\x26\xf2\xf0\xc5\x6c\xd5\x14\xbd\x3d\x69\x37\x50\x39\xf1\xaa\xaa\x35\xcf\x96\xc2\xf4\x76\x30\xd5\x4a\x05\xff\x1c\x9b\xd8\xb6\x9d\xa6\x8a\x66\x91\xec\xb4\x42\x44\x14\xe6\x6b\xe5\x8d\x28\x56\xac\xd6\x96\x27\xd7\x53\xf5\xb6\x29\x2c\xf4\x18\x4b\x3b\x05\x5d\x12\x96\x61\x04\x68\x00\x7a\xc1\xb6\x5e\x01\xf3\x91\x1e\xef\xe3\xf1\x24\xc8\x9e\xaa\x4e\xbf\x5f\xba\xc8\x45\xcd\xec\x92\x2b\xf6\xa7\x67\xac\x12\x75\x36\xca\x9b\x0a\x19\x11\x59\x42\xa9\x5e\x91\x9e\x20\x77\x3e\xc1\xbc\xbf\x06\x7e\xe2\xc9\x8b\xca\x15\xfb\xf2\x2b\xb6\xd4\x4d\x6d\xa6\xa7\xa3\xbd\x22\xcf\x41\x22\xa6\xf5\xd1\x09\x49\xd5\x2b\x96\x15\x82\x1b\xcb\x9e\x3f\x63\xa5\x54\x8d\xf3\xc1\x47\x78\x4c\xc6\x88\xbe\xf6\xe2\xae\x7f\xf8\x2a\x41\x52\x7a\xc4\x75\x83\xe3\xc3\xbf\x1f\xa9\xd9\x60\x27\xa9\x0d\xb9\xa2\x8d\x90\xea\x99\xc2\x23\x51\x69\xa9\x6c\x3f\xf4\x9a\x7e\xf0\xbc\x8b\x35\x9e\x82\x50\xa9\xa1\x5b\xb2\xd5\xf2\xaf\x46\xcf\x56\x36\xad\x89\xfc\xff\x45\x19\x7d\xce\x3c\x1e\xfe\x31\xfa\xab\x70\xd3\xb4\xea\x3a\x72\x43\xb6\xca\x2c\x79\x2d\x0c\x74\x99\xcf\xf7\xe3\x75\x47\x21\xe7\xc2\x29\xca\xc8\x1f\x7c\x94\x7e\xb9\xb5\x58\x48\x63\xeb\x58\xdf\x68\x42\xeb\xdf\x94\xe2\x28\x2f\x6a\xdd\x90\x7a\x1f\x0e\x8e\x0f\xe4\xf8\xa0\x31\x3c\xaf\x52\x7a\x50\xf1\x2c\x13\x06\x10\x02\xfe\x89\x41\x54\x1e\xce\x92\x20\xf1\x69\x74\x46\xdf\xec\x62\x4e\x5a\x9c\xae\xf3\x79\xb8\xad\xa1\x01\xa7\xa6\xbe\x9b\x70\x53\x45\x8e\xde\xb7\xfb\xde\x09\xd4\xe5\x56\xa2\x2e\xa5\x31\x52\x2b\xe2\xe3\xb9\xd1\x0b\xfe\xe1\xfb\xaa\x87\x2b\x38\xc2\xee\xa1\xa0\x9e\x6e\xa4\x9d\x6d\x66\xa4\x5a\x14\xc2\x99\x68\x65\x53\x58\x59\x15\xdd\x4e\xbe\xa3\x29\x0c\x1c\xde\xd4\xef\x03\x62\x01\xc1\x9d\xe0\x01\x73\x03\x8a\xfb\x45\xa5\x6b\x0b\xc0\x7a\x76\xd0\xce\x59\x28\x5b\x93\x7b\xca\xd7\xce\x33\xa9\x78\xcd\xdb\x63\x97\xe9\xb2\xe4\xe6\x30\x01\xc3\xcb\x81\xd8\x12\x3d\x60\x67\x94\xd7\xbc\x68\xb7\x9f\x5c\x71\xe9\x6b\x40\x1f\x5a\xe9\x58\xa1\xb8\x22\x15\xba\x0c\x13\xe8\x20\x86\xe9\xdb\x96\x54\x76\x21\x6f\x88\x46\xce\x9a\xb6\xf1\xe1\xb4\x6f\x78\x76\x2d\x88\x70\xa6\xef\x4d\xd8\xfc\x7c\xa5\x78\x29\x33\x5e\x38\xad\x53\xeb\x1b\xe9\x94\x0e\x31\x03\x31\x9c\xa7\x39\xf2\x7d\xd5\xb1\x53\x39\x49\x22\x74\x3a\x4f\xf0\xf4\x93\x4e\x42\x63\x68\x5c\x0b\x83\x73\xf0\xbd\x71\x3e\xeb\xf8\xef\xb2\x41\xe2\xbd\x5a\xde\x64\x22\x25\xc2\xe0\x3e\xf2\xa1\x17\x16\xd7\x21\x79\x69\x7f\xf0\xd7\x21\x51\xd3\x82\x69\xdc\x16\x08\x19\xc6\x15\xe3\x85\x7b\x8a\x57\x81\xb6\x67\x8c\x4b\x3b\x5b\x41\x26\x9e\xfe\x0c\x13\xbe\xb0\x9e\x45\x5b\xce\x6b\xb5\x92\xdf\x9c\x0e\x1d\x92\x77\x3c\xd7\xf1\x1a\xf9\x9b\x42\x67\xd7\xec\x54\x40\x38\x6c\xbb\x6f\x12\x2d\x73\xe0\xcb\xec\x9b\xd6\x39\x49\x02\x13\x04\xf8\x80\x78\xcf\xcb\xaa\x10\x66\x7a\xfd\x27\x00\x10\x78\x85\x76\x5c\xcf\xf2\xe3\x77\x67\x27\xa7\xaf\xcf\xa6\x65\x1e\x9b\x2d\x7f\x14\x3f\x48\x96\x7c\x11\xab\x71\x26\xac\xd4\x4a\x5a\x5d\xc7\xee\x74\x8a\x1b\x34\x37\x57\x6e\x7d\x52\x75\xc2\xfe\x2b\x59\x08\xb3\x32\x56\x94\xb0\xe0\xd4\xb4\x48\x0f\x83\x0b\xfa\x61\xa5\x1b\x76\xcb\x21\x5c\x8e\x27\x98\x66\x96\x5f\xc9\xea\x05\x3b\x53\xa6\xa9\xbd\xdc\x10\x4f\xc1\x29\xd3\xa6\xea\x3e\xd3\xbd\xb1\x81\xc9\x25\xbc\x9a\xee\x72\x91\x24\xba\x5d\xe4\xd6\xd9\x2d\x38\xad\x29\x3b\xf3\x17\xe2\x05\xdb\x13\xef\xed\x57\x7b\xb4\x68\xfe\xde\xfb\xb9\xd9\x3b\x62\x7b\xca\xce\xcd\xde\x94\x9d\x97\x55\x21\x33\x69\x8b\x95\xbb\xae\xa2\xa6\x92\x88\x20\xc5\x1a\x4e\x8c\xc9\x41\x81\x2b\x6a\x03\x92\x54\xd0\x20\x9f\x04\x28\x32\x56\xd7\x7c\x21\x82\x96\xf8\x4d\x3d\xa3\x7d\x07\xe2\x85\x96\xfa\x96\xe5\x9a\xdd\x42\xc1\xde\x8d\x50\x16\xeb\xcc\x68\xd6\xbe\x37\x15\xbb\x13\xc6\xe6\xb5\x2e\x9d\xc5\x5f\xd5\xba\x94\x86\xfe\x66\x0a\x56\xf2\x6c\x29\x95\xa0\x80\x86\x92\xac\x07\x50\x69\xe9\x8a\xe2\x6a\x29\x58\xed\x9e\x33\x14\x48\x7e\x9d\x59\xef\xc1\x89\x7b\x40\x7e\xb3\xd4\xb7\x13\xab\x27\x8d\x11\x13\x49\xaa\xf5\x4f\x5a\xc7\x6b\xb1\x02\xe2\x86\xe4\x95\xfc\x3b\x0a\x0a\x29\x50\x32\xe4\xc4\x6a\x40\xe0\x82\xac\xb9\xae\xd9\xbb\x6f\x4e\x9d\xf5\x3c\x0d\x26\x2f\xed\x06\x18\x76\x2c\x6c\x76\x9c\x89\x6a\x79\xec\x3f\x39\x19\x70\xf8\x39\x6d\x73\x78\xcb\xd3\xf7\xf9\x84\x65\xba\x28\x44\x06\xd8\x2d\x3d\x67\x2f\x45\x45\xdb\xe8\x30\xa5\xe9\xa3\x5f\x1d\x6a\x65\x67\x42\x3d\x67\xd2\x66\x56\x5a\x93\x80\x91\x77\xa9\x3e\x27\x2f\x41\xf3\xf5\x62\xc4\xf5\x2c\x7f\xfc\xed\x7c\xec\x38\xf3\xfe\xe8\x81\xe6\x56\xa0\x11\x16\x2c\x32\x22\xe0\x01\x7d\xe5\xd7\xce\x72\x35\xd3\x71\xa2\xc3\x8f\xff\xf0\x51\x23\xd3\x48\x17\x43\xa4\x11\x5d\xa7\x0d\xf5\xa2\x20\x4f\xc2\x89\xd1\x1d\x9f\x58\xe5\x8d\x5d\x0a\x65\x65\x86\xf0\x58\xcf\x6a\x43\x8d\x9f\xb6\xcf\xe7\xf9\x1c\x23\x7b\xb9\xc8\x99\xbe\x11\x75\x2d\x73\x62\xf0\xb8\x7d\x3f\xfb\xc9\x21\x59\x8c\x66\x65\x3f\xe8\xe9\x21\xa3\x73\x53\x61\xb1\x29\x15\x19\x5b\x6b\x31\x86\xd5\x14\xe4\x1c\xf8\x96\xeb\xfc\xc8\x75\x14\xdb\x2b\x28\x42\x0d\x04\x59\x6c\xaf\x76\x62\x84\xea\x87\xc7\x8f\x31\xf7\x1e\x74\x27\x6f\x9c\x07\x9d\xe7\xa5\x54\x9f\xe5\x93\x6e\x32\x5e\x88\xf3\xb7\x49\xa1\xcf\x4b\x94\x31\x8c\x7e\xfa\x7f\x8c\xb7\xda\xdc\x61\x33\x40\x98\x14\xf8\x9d\xac\xe5\xd9\x52\xe4\x88\xbb\xc2\x1c\x70\xb4\x58\xad\xd8\xdf\xdb\xfb\xc9\x94\xce\xe3\x91\x56\x8f\x12\x93\x5c\x70\x2b\x6e\xa3\xcd\xee\x49\xf7\x6c\xc7\xff\x24\x25\xba\xf6\x04\xa2\x99\xeb\xc1\x4c\x62\xbd\x2b\x06\x2c\xb1\x6f\x00\x30\x2b\xcc\x87\x72\x69\x39\xef\x6d\xe1\xc6\x2e\x74\x48\x93\xd9\x86\x1b\xa7\x21\xd0\xb8\x16\x34\x4c\xd5\x6a\x20\xed\x41\x93\x30\x6e\xf8\x13\x9f\x9e\x54\x0e\xcb\xcc\xf3\xbc\x16\xc6\xa4\x44\xb7\x83\x86\x3b\xb9\x38\x67\xdf\xe2\xfc\x1e\x7c\x5d\xaa\x5a\x5b\x8c\x27\x9c\xea\x92\x4b\x52\x8d\xd9\xc6\x02\xf5\xda\x78\x90\x35\xb6\x1b\x17\xed\xe4\x18\xce\x0e\x82\x52\x9e\x89\x7f\x2e\x17\x0d\x35\x5e\xed\xe3\xc5\x0f\xbe\xda\xa3\xb9\xb9\x1b\xee\x23\x3b\x70\xb2\x8f\x6f\x6b\x69\xc5\x21\xed\x8e\x6e\x7a\xce\x5b\xbc\x60\x22\x38\x27\x78\xce\x43\x2f\xf8\x33\x76\x3b\x3b\xaf\xb3\xcb\x5b\x53\x43\xae\x3d\xfe\x54\x77\xbe\x83\x5a\x20\xe2\x02\x58\x57\x90\xc5\x8c\x50\x46\x02\x12\xb9\x57\xf4\x49\x3b\x1d\xe7\x73\xa4\x19\x6a\x29\x02\xd1\x9d\x3d\x62\xdf\x69\x6a\x09\x88\x7f\xa9\xb4\xf2\x87\x8d\x4b\x12\xe1\xde\xce\x97\xdc\x3a\x76\xbe\x24\x69\x24\xe9\x77\x63\x8a\x33\xc5\x67\x05\xad\x62\x78\x68\x78\x16\x1c\xea\x81\x04\xc8\x3b\xce\xa5\x71\xff\x25\x7d\xd4\xe5\xe5\x77\x80\x30\x6c\x54\x88\x6b\x01\xee\xcc\x9b\x1c\xb4\x44\xb2\x2f\xa2\xc4\xc7\xe7\xe1\xf5\x38\xbe\xdf\xaf\x89\x9c\x6f\x43\x3e\x66\x95\xbb\x65\x11\x26\x90\x13\x90\x4b\x36\xfc\xac\x40\x89\xf3\x96\xeb\x14\x2b\x5a\x89\xe0\xe4\xab\xa5\xcc\xae\x2f\x3a\x50\x20\xd3\xb5\xfb\x37\xd5\xfb\xa7\x64\x83\x7c\x04\x79\x69\x97\x06\x97\xed\x62\x8c\xc4\xca\x55\xcf\xac\xbe\x44\xb9\x34\xcb\x53\xeb\x82\x71\x63\x74\x26\x3b\x94\x2e\xa4\x56\x5b\x93\x94\x76\x6d\xc0\x8c\x7d\xf8\x25\x06\xaf\x6e\x74\xdb\xde\x24\xac\xb0\x77\x7e\xb9\xe9\xd9\xf2\xce\x40\xf4\xbb\xf7\xe0\x4b\x84\xd7\x95\xda\xfc\xe7\xce\x65\xe2\x49\x90\xd3\x21\x08\x32\x60\x49\x52\x5c\xaa\x10\x73\x58\x72\x0c\x31\x0e\x8f\x38\xd1\x95\x95\xa6\xd5\x76\xd0\x86\xe1\x41\x37\x8f\xd6\x16\x61\x5b\x0e\x69\x18\x6a\x34\x34\x96\x7a\x5f\xe6\x05\x4a\xbf\xd2\x55\x53\x70\x2b\xfa\x4b\x14\xbf\x36\x91\x86\xdc\x3a\x16\x0a\xbf\xe3\x01\xa1\x92\x29\x06\x75\x8f\x6d\x33\x3d\x26\xdf\x92\xb7\x26\x92\x1a\x22\x59\x2b\xd3\xaa\xbd\x88\x10\xbd\x63\xb3\x55\x98\x2f\x31\x79\x3b\xe4\x59\x1d\x81\x23\x15\x39\x51\xfb\xc1\x81\x67\x7f\xf8\xea\xab\x29\x3b\x95\xb5\xc8\xac\x26\x97\xac\x78\x3a\xe9\x96\x58\x8e\xd7\x02\xbc\x40\x24\xdf\x24\x06\x61\x20\x68\x89\x95\x09\x16\x52\x9a\xe9\x44\xa6\xeb\xc4\xa5\x23\x90\x8e\xae\x91\x8c\x8e\x42\x10\xba\x4e\x08\x3a\x02\x99\xe7\x1a\x79\x27\x99\x78\x33\xb5\xd4\x3b\x8d\x58\x93\x0c\xea\xf9\x58\x87\x15\xe8\x91\x42\x4d\xb6\xf7\xfa\xaa\x6c\x76\x46\x49\x41\x1a\x6c\xf4\x43\xa1\x63\x55\x36\xba\xa0\x0c\xfb\x98\xd0\x4c\x0f\x3c\xe8\x5b\xbb\x97\x90\xd5\xd3\xd6\x9e\x25\xc3\xae\x23\xe4\x60\xfa\x7a\xc7\x8e\xf4\x5e\x23\x77\x76\x18\x21\x7f\xff\x66\x5f\x91\xf5\xce\x20\xd4\xd0\x1c\xbf\xa3\x1f\x08\x93\xb4\xed\xdf\xec\x02\xd2\xef\xe3\x41\x12\xd9\xef\xfd\xb1\xd9\xbd\x83\xb6\x43\x76\x5b\xcf\x8e\x5e\xd7\x0d\xea\x75\x6a\x09\x1c\xc6\xea\xb5\xb1\xbd\xc3\x46\x5a\x8f\x8c\x47\xc3\x4f\x6e\xe9\x82\xd1\xeb\x62\x41\x8d\xbb\x85\xde\x17\xe3\xf4\xae\x48\xa6\xff\x4b\xa3\xfe\xa3\x77\xa7\x48\x20\x0c\x4c\x27\x0b\x4c\xea\x3f\xb1\xe1\x03\x8f\xd5\x31\x62\x04\x2e\xb3\xb4\xee\x10\x1f\x73\x2b\xc8\x42\x59\xbf\x13\xc4\x68\x5d\x1c\xca\xed\x3e\x45\x82\x44\xf0\x46\xee\xa1\xdb\xc2\xf6\x1e\x0b\xa9\x94\x5a\xa1\xb3\xc2\xd0\x99\xa0\x62\x76\x71\x8c\xed\x52\xfc\x7f\xec\x1d\x5d\x73\xdb\xb8\xf1\xbd\xbf\x02\xa3\x97\x4b\x3a\xb6\xfa\x1b\x14\xe7\x1e\xd4\xb3\x1d\x8d\x95\xf3\x4c\xdf\x0e\x22\x61\x0b\x63\x12\x60\x01\x50\x8e\xae\xd3\xff\xde\xc1\x2e\xc0\x0f\x91\x00\x29\x52\x71\x9a\x1b\xf3\xe5\x2e\xa6\xb0\x04\x16\xbb\x8b\xfd\xc2\x2e\x3e\x61\xc3\x62\x06\x50\x6b\x92\x84\xcc\x8b\x39\x18\x00\xc3\xe4\x62\x46\x06\x3e\x97\xa9\x2a\x75\x89\x4a\xfe\xf3\xaa\x49\x75\xe4\xde\x05\xfa\x7c\xa1\x9a\xd0\xaa\xb3\x4f\x72\x3a\xa7\x67\x5a\xa3\xba\xfe\xc5\x2a\xe3\xb7\xeb\xe1\x5f\xb0\x96\xfd\x69\x05\xfb\xba\xfa\xfc\x0c\xa0\x20\xb9\x5a\x35\xe7\xe7\xd7\xc7\x9c\x57\x69\x7e\xd6\xc1\x36\xa7\x4a\x77\xac\x36\xf7\x64\x05\xbf\x53\x91\x5b\x5f\xa0\xa8\xf6\x25\xf2\x3d\x2e\xe2\xd4\x6f\x45\xe7\x27\xfa\x86\x49\x7d\x1f\xd4\xdd\x99\x87\x80\x7a\x41\xa1\x73\xb1\x3d\xb8\x2e\x76\x0b\xe4\xfb\x7b\x88\xc9\x2c\xa7\x3d\x4e\x41\x9e\xad\xa9\xb6\x29\x17\xa1\x7c\xd9\xb6\x3c\xf7\x67\xaf\xa3\x06\xf3\xd7\xc9\x0d\x7e\x4f\x9d\x8d\x3e\x3f\x51\xea\x6c\xcf\x1d\xfd\xd6\x7d\xfb\x69\x52\xa8\x7d\x47\xff\xad\x83\xad\xef\x79\x90\x7f\xbd\x3c\x48\x4f\x4d\x53\xc3\x2c\x55\x0e\xa4\xb3\x88\x9f\xa4\x22\x72\x67\x15\xc9\x39\x25\x12\x6a\xd9\xbe\xda\xac\x49\xa2\x18\x94\x6b\xa7\x99\x5e\x12\xb2\x9e\x96\xba\xdd\xb6\x80\xab\x4c\x28\xb0\xb7\xa7\x86\xd7\xd0\x5f\x4d\x8d\x61\x79\x61\xe6\x30\xe4\x7b\x1a\xe4\xc9\xf3\x9e\x06\x39\xe9\xf9\x7f\xc9\xa5\x79\xac\x40\xf9\xa8\xd1\xbe\xcc\xa9\xb8\xb6\x52\x7e\x72\x42\x64\xeb\x7e\xc2\x89\xf2\xb7\xf4\x37\xb5\x27\x43\xd6\x10\x1e\x87\x22\xa2\xa5\xe0\xff\x2e\x59\xe5\xd1\x9e\x0e\xd2\x9a\x08\x3f\x30\x25\x0a\xbe\x7f\xc1\xbd\x44\x93\xe7\x02\x27\x46\x22\x8b\x6a\x27\x9d\xf2\x8e\xc8\x9e\x66\xc1\x7a\x5a\x80\xc3\x81\x08\xd9\xb0\xcf\x26\x06\xd2\xea\x78\x9c\xd9\x33\x34\xfd\x36\x32\x6d\x5a\x7e\xb3\x4e\x0c\xf4\x75\xa2\xb3\x93\x66\x99\x7c\x9d\x5e\xc3\xbc\x69\xac\x58\xfe\xb0\xb8\x75\x3d\x09\xa6\x56\xd6\xe5\x4a\x49\xe5\x52\xcb\xb8\x68\xb0\xda\xd4\x0b\xf5\x86\x3f\xef\x0d\x53\xe8\x34\xc4\xbb\x03\x4b\xb2\x65\xa6\x41\x5a\x53\x33\x7c\xa8\xc0\xf2\x80\xf6\xff\xfd\x15\x7d\xd8\xae\x74\x46\xe9\x9b\x1d\xdb\xd3\x03\x97\xa5\xc2\x59\x1a\x49\x16\x0e\xdc\x82\xf0\x69\xda\xc7\x51\x96\x55\xfa\x4d\xa9\xed\x06\x55\x94\xa4\x2f\x44\xfb\xf7\x35\x40\x70\x52\xa7\xd3\x0c\x37\x17\x43\xbe\x66\xdf\xb8\x36\x1d\x1a\x98\x45\xf6\x2e\x0f\xeb\x4d\x25\xe2\x41\x17\xd6\x20\x78\x9c\x54\x39\xb3\x2d\x07\x9b\x90\xe6\x94\x54\xa6\xe4\xb0\x05\x50\x97\x76\x5c\xb8\xfe\x26\x58\x00\xd9\x97\x19\x7b\x3b\xe7\xc5\xf4\x8b\xcd\x88\x87\xcd\xf9\x11\xd4\x77\x7f\x49\xf4\x79\xf7\x97\xfc\x50\x7f\x49\x75\x45\x22\xe3\xc9\x71\xfd\x79\xbe\xd5\xee\x6e\xaa\x20\x40\xf2\x89\xea\x89\x39\x52\x77\x54\xd0\x67\x0c\xd1\x7c\xd8\x6e\x3e\xdd\x7d\xb4\x8c\x04\x01\xab\xf5\xe7\x46\x72\xf9\x44\x61\xef\xa2\x34\xdb\xe6\xea\xef\xdf\xba\x84\x2f\x39\xc5\xff\x45\xec\x9a\x37\xd8\x81\x37\x2f\x76\x4c\x2a\x9b\x61\x33\x31\xa0\xda\x6d\xca\x83\x17\x15\x7c\x8f\xbd\x69\x3a\xf8\xc9\x21\x79\xc8\xd3\x97\xef\x81\x96\x46\xc7\x88\xd0\xda\x87\x53\xb6\x06\xd2\xb2\x4e\x68\x48\x51\xc3\x9e\x8f\x9f\x59\x91\xc9\xa3\x25\x81\x4d\x3d\x05\xf7\xd3\x1d\x2a\xb1\x6a\x47\x93\xc8\x9a\x55\x99\x41\xd9\xed\xf4\xb4\x65\x1a\x11\x8c\xa5\xb5\xd4\xe7\x42\x1b\x9a\x65\x16\x0b\xf0\xe5\x20\xc8\x51\x87\xff\xf0\x31\x7f\x8d\x33\x8b\xbc\x6f\x37\x9f\x8d\x1a\x00\xe3\x8e\x78\xf8\x60\x9c\x76\xc7\x26\xde\x8d\x4a\xb1\x6b\x93\x3c\x48\x83\x87\x32\xb3\x07\x6c\x96\xea\x56\xaf\x5e\xd4\xc5\xdd\xae\x0e\x73\x02\x25\x05\xca\x16\xbb\xa2\x2b\xb2\x2b\xad\x22\xcf\x74\x33\x42\x3f\xa1\x15\xf0\xeb\x1e\x73\x62\x2d\x50\x42\x8b\x22\xe3\x0c\x9c\xef\x52\xb9\x84\xd3\x46\x44\xd3\xfd\x6c\x78\xa6\x15\x98\x21\x71\x75\x86\x4e\x39\x56\x87\xbc\x26\x07\xa6\x76\x43\xe8\x3c\x47\x3d\xa4\x05\x87\xfc\x9b\x51\x9a\x64\xbb\xa9\xee\x66\x8d\x23\xbd\xaf\xab\xe9\xa8\xf2\x2f\x47\x89\x2e\xa0\x15\xb7\xcb\xbe\x2d\x26\xde\xcd\x42\xc7\x77\xdd\x7d\x65\xb5\x59\x8f\x82\xf8\x8c\x13\x83\xe6\x25\xb5\xf3\xdb\x5a\xcd\x14\x2b\x74\xd4\x7d\x37\xe9\xb3\xfd\xea\x38\xa5\x5f\x8a\x6a\x81\x4c\x94\x39\xc3\xa6\x28\xd5\x64\x09\x17\xf0\x8d\xd5\x66\x7d\x46\x03\xa4\xca\x9f\x9e\x65\xf2\x75\x9c\xba\x76\x5e\x32\xed\x19\xc9\xb3\x67\x9d\xa6\x42\x8a\x07\xb7\xf4\xdf\x1f\x6e\xcf\x27\xa0\xfb\xf6\x78\xd7\x98\x81\x41\xdf\xb8\x82\x2a\xc3\xe9\xb8\xdc\xba\x52\x65\xce\xf6\xa7\x58\xcf\xcc\x5d\x68\xdb\xd3\x03\xab\x1b\x61\x2c\x09\xf9\x3b\x50\xc4\x28\x98\x6e\x33\x50\x12\x81\x77\xd4\xb5\xab\x79\x2a\xb3\xec\x8a\x3c\x71\x41\xed\xc1\xc2\x0a\x97\x22\x31\x0a\x28\x24\x09\x6d\xb9\x48\x98\xc5\xdd\xb5\xa7\x1b\x02\xab\xf7\x6e\x92\x4a\x24\x8d\xcc\x57\x74\x89\x98\x2c\x4b\xa1\x75\x24\x4c\xd5\x8a\xa8\x84\xee\x32\x8c\x4b\xdd\x64\xa5\x36\x4c\x3d\xc8\xd8\xe1\xd4\x7c\x1a\xb7\x3d\xa0\xfc\x37\x6d\x82\xf8\xc4\x45\x0a\x17\xa0\x1e\xe0\x18\x4e\x46\x06\xf4\x18\x87\xf0\x81\x9d\x1a\xe4\xd9\x5a\x1e\xa9\x19\xe7\x83\x2e\x93\xbd\x45\xf1\xa2\x90\xa9\x1e\x67\xf5\x48\x45\x16\x18\x84\xd3\x8b\x8f\xf6\x5f\xa7\x38\xc5\x0b\x10\x08\x7b\xdc\xa6\x6b\xb2\xf8\x07\x2d\xf8\xe2\xe3\x15\x81\x8d\x87\xb4\x33\x69\xf6\x3f\x0f\x4f\xfa\xf5\x83\x57\xec\x6c\x8e\x7c\x68\x8e\x06\x7e\x14\x55\xe6\x96\x3d\x31\x47\xb6\x16\xcc\xb8\x06\x26\xc6\xf8\x42\xd5\xb7\xe0\xe4\x14\x5e\x12\xb2\x12\x84\xe5\x85\x19\x17\x9f\xb6\x92\x21\x67\x54\x38\x88\xec\xc0\xd4\xd1\xec\x5d\x21\xf1\x9f\x4e\x78\x56\xb4\x3f\x79\x93\x9c\xc0\xf4\xc8\xae\x99\xc9\x4a\x84\x71\x7c\xde\xd9\x10\x0f\x7b\x95\x65\xad\x2c\x25\xf8\xa7\x3f\x92\x7f\x1a\x1c\x83\xae\x74\x36\x7e\x1f\xed\xa8\x36\x6e\xf1\x4f\x78\xc4\x58\x01\x36\x6e\xaa\x92\xac\x6e\x6f\x5d\x1e\x06\xe2\xee\x37\x2e\x52\xb4\x5c\x56\xc6\x28\xbe\x2b\x0d\x7b\x60\x76\x31\xc9\xf8\xfc\x6c\xa7\x27\xf9\x0b\xf6\x5c\xc3\x36\x2e\x09\x4c\xb2\xbb\x6f\xa3\x60\xbe\xd8\x79\xfd\x0c\xfb\xda\x35\xa0\xc6\x18\x3f\x01\xb0\xde\x2e\xec\x03\x11\x1c\xe8\x8c\xca\x3b\x99\x76\x59\xf7\xa4\x52\x4b\xfd\x43\xa7\x84\x1e\x1b\x9e\x49\x07\xc7\x19\x4b\xc7\xa2\x73\x34\x87\x51\x1d\x40\x6b\xe8\xeb\xb5\x67\x0b\xe4\x79\xe3\xcd\xd7\x63\x81\x11\x48\x42\xc9\x53\x46\xfb\x10\x5f\x11\x13\xc8\x59\x54\x7d\x6f\xb6\x8f\x7e\x21\x9a\xf0\xae\xe6\x1c\xb5\x7b\xc2\x96\xce\x75\x8d\x9d\x9e\x77\xbd\xce\xe0\xb8\xa1\x53\x81\xeb\xa7\x91\xa1\xa4\x24\x13\x74\x93\x87\x30\xfd\xd5\x75\xd6\xa1\x8d\x7d\x76\x4d\x85\x2a\x67\x44\xe4\x16\x0e\xe8\x6a\xdb\xc7\x16\x69\x44\x66\xde\x43\x9c\x2f\xec\xf8\x2a\x55\xda\xc1\xc6\xd9\xb4\x14\xfc\x42\x46\x77\x2c\x8b\x13\xfe\x1d\x2d\xec\x82\xeb\x3b\x76\x28\xf1\x30\xe3\xc8\xd9\x7a\x78\x43\xa3\xd4\xe8\xed\x96\xea\x99\x0a\xfe\x67\x17\x2f\x76\x5c\x62\x79\x54\x2a\xfe\x27\x23\x1f\x30\x82\x8e\x9e\x9f\x8c\x25\xe6\xa3\xa3\xb2\x8e\xec\x8a\x90\x20\x4d\x53\x8e\x1a\xcd\x26\x42\x3d\x61\x04\x70\xf1\x72\x29\xfc\x06\x58\x24\x4e\xd5\xe1\xd4\xa5\x01\x19\x5a\xaa\x40\xf6\x7d\x70\x5c\x4e\x39\x9e\x35\xdd\x6e\x20\x6f\xb9\x62\x96\x53\x7e\xee\xd4\xf1\x99\x80\xab\x9c\x9a\x52\x71\xd3\x39\x14\xc2\x03\xb8\xf8\xad\xdc\x31\x97\xcd\x34\x7a\x98\x80\x4b\x37\xab\xcd\x7a\x3e\x6a\xbb\x8d\x9d\xdd\x64\xac\xba\x41\x4a\x41\xf3\x1d\x7f\x2e\x65\xa9\xb3\x63\xc3\x39\x4d\x28\x1c\xfb\x4b\x42\xd6\x7d\xce\x8f\x54\x32\x2d\x7e\x31\x84\x0a\x29\x8e\xb9\x1b\x2c\x92\xac\x4c\x59\xeb\x1b\x90\x8d\x70\x90\x3c\x25\xb4\x34\x32\xa7\x86\x27\x24\x91\xf8\x6e\x08\x76\xa9\x19\xa1\x01\x68\x49\xa9\x8d\xcc\x49\x4e\x95\xde\xd3\x2c\xeb\xdb\xdf\xc9\xe7\x4c\xbf\x6f\xe6\x1a\xf0\xd1\xf3\xe7\x03\xce\xed\x4c\xaa\x8d\xf4\xe9\x1e\xa0\x5a\x3b\x8d\x49\x03\x0f\xfd\x34\x38\x30\xd6\x15\xc7\xec\x94\x95\x8f\xa0\x37\xb6\xf2\x10\xcf\x45\xe7\x1e\x90\x4d\xc1\x31\x8a\x15\x19\xed\x31\x9e\x22\x95\xad\xec\xc9\x0a\xea\xb2\x14\xac\x1a\xbf\x24\x5b\xf4\x15\xe5\xd4\x24\x18\xbf\xfb\x23\x67\x86\xa6\xd4\xd0\xa5\x55\x2f\xff\xe8\xcc\x0a\x5d\x2d\xce\x0b\x28\xb3\xd4\x02\xee\x3f\x7e\x7a\xe6\x8d\xc7\x96\xec\xe0\xba\x35\xef\x5b\x7b\xc6\x56\x3f\x05\xb5\x00\x2e\xe9\x35\x5d\x8d\x67\x1c\x78\xb1\xcd\x82\x65\xff\xfa\xcd\xea\x78\xc1\x50\x50\x6b\x6e\xa7\x03\xda\x76\x52\xd6\x9e\xb9\xe3\xc1\x9c\x05\xd3\xc6\xf1\x02\x62\xfd\x2b\xf0\x82\xad\xee\x3f\xf7\x5b\xf2\x71\xeb\x23\x62\x6d\xb4\xfd\xd6\x91\x69\x7a\x1f\xa4\x7b\xd3\x72\x4c\x07\xb4\x36\xd4\x6b\xae\xb0\xa8\x05\xde\x62\x05\xaf\x09\x53\xb4\x02\x81\xdb\xd7\x2a\x49\xe2\xc6\xf5\xa3\x65\x30\x72\x10\x8f\x17\x84\xef\xf3\x5f\x57\x13\xeb\x7d\x3d\x1c\x38\x88\xde\xb9\x6f\xa1\xd9\x95\xf8\x80\x6a\x27\x80\x6f\xb8\xcf\xe9\x7d\x41\x15\x8a\x9d\xfb\x21\x62\xc5\xc5\x02\x2d\x23\xcc\x46\xbf\xe0\x91\xd3\xae\x36\xae\x55\x5e\xed\x85\x1d\x7f\xd1\xee\xaa\xac\x14\x7a\xcf\x0b\x38\xa9\xa2\x96\x2a\x70\x84\xdb\x65\xf2\x48\x33\x9e\x56\xc0\x91\xce\xd7\xe2\x8a\xdc\x4b\x63\xff\xf3\xeb\x37\xae\x8d\x1e\x28\xd5\xf2\x59\x32\x7d\x2f\x0d\xfc\x76\x16\x4a\x70\x52\x23\x11\xe2\xf4\x76\x74\x06\x02\xef\x35\xb4\x7b\xbf\xbc\xb5\x13\x87\x31\xe2\xc2\x87\x6b\xb2\x16\x44\x2a\xbf\xf2\xaa\xb4\x8e\x76\xc0\xfd\x2d\x4c\x21\xc5\x35\x38\x07\x3d\xf4\x08\xd0\x6a\xd3\xb8\xf6\xa8\x94\xaa\x85\xaf\xc0\x87\x22\x30\x77\x8c\xb8\xcf\x63\xbe\x2a\x8c\xe1\xda\x1f\x1d\x29\x49\x4b\x40\x01\xf5\x7e\x04\x9e\x90\x9c\xa9\x68\x81\xc9\xc2\x4a\xcf\xa1\xad\x8b\x7b\x57\x06\x7d\x2a\xd1\xfd\x07\xf1\x7d\xdb\x6b\xc3\x91\x5e\x51\x8f\xbf\x45\xb1\x98\xa3\x61\xf7\x1f\x2b\xe7\x00\x93\xff\x85\x4a\x53\x7a\x49\x56\x44\x73\xf1\x1c\x08\x9e\x36\x7f\xef\xee\x93\x36\x41\x5b\xa8\x5c\x13\x2b\xd0\x0e\x34\x63\xd8\x32\x97\x0a\x7f\x99\xba\x17\xa4\x7c\xea\x1c\x44\x57\xae\xfa\x93\x95\x32\x55\x34\x64\xf1\xc2\x8e\x8b\xab\x16\x69\x86\x52\xa3\x17\x6b\xb1\xa8\x6b\x10\xb4\x08\xa5\x8a\x4c\x42\x70\x65\x01\xef\x16\xcb\xce\xd9\xd5\x0b\x76\xc4\x79\x16\x14\xf3\xe3\x2c\x55\x12\xdb\xf3\x80\x46\xd8\xda\xe7\x2f\x0e\x33\x5e\x05\x87\xbd\x7e\x55\xb4\x28\x98\x22\x54\xc9\x12\xcc\xed\xfc\xc0\xd4\xd2\xff\x04\x82\xe7\x5d\x7f\x9a\xf7\x09\x25\x52\x29\x96\x18\xaf\xbc\xbb\xbc\xec\x7f\xad\xee\x6e\x01\xc1\xff\xdc\x7e\xb9\x1f\xa5\x31\x69\x43\x4d\xd9\x5a\x72\x6b\xe2\x2e\x28\xb5\x45\x9f\xa0\x9b\xdb\x16\xc6\x34\xa5\x77\x27\x6d\xa0\x23\x4c\xf0\x3b\xa8\x26\x16\x32\x5d\x12\x07\x24\xa7\x47\x62\x14\xe5\x19\x76\x5b\x4b\x4c\x09\x41\x3f\x6a\x9c\x4a\xe9\x72\x02\xff\x36\xb8\xa3\xa1\x73\x35\x61\xca\xe8\x5b\xaa\xcd\xef\x45\x4a\x7b\x1c\x55\x27\x6a\xa1\x36\xc4\xf0\x1c\x73\x15\xe4\xab\x60\x29\x59\x6d\xd6\x6e\xf9\x08\x8b\xbc\x32\xc5\x48\x89\xd0\xc6\x2a\xa5\x75\x59\x0a\x3b\xec\xda\x7e\xa2\x3b\xcb\x07\x69\xd7\xbd\xea\x94\x2e\x6d\x6b\xdc\x43\xb3\xe3\x59\x46\x14\x40\x22\x82\x7d\x3b\x25\xfa\xe9\x33\x94\x02\x59\x25\x6e\x10\xdc\x3a\x35\xb5\xfe\xb9\x55\xdb\xf6\x5c\x1b\xa9\xdc\xa9\x06\xf5\x4f\x15\x85\x46\x04\x5d\x8f\xfc\x3c\xd3\xfc\xa6\xfa\xac\xd5\x7a\x18\xad\x6a\xf4\x39\xfa\xa3\x96\xf1\x14\x4b\xa4\x4a\xab\xc9\xf4\x49\x8d\x7a\x7a\xbe\x2d\x7e\x2f\x27\x5c\xd2\x05\x93\x51\x6d\xbe\x56\xdf\xb5\xdb\x3c\x42\xd1\x6e\x13\xac\x5b\x64\x3d\x7b\x1f\x5d\xb6\xf6\x18\xbe\x0c\x9c\x6a\x70\x1e\xc0\xed\xb3\x98\xb6\x1c\xd1\x78\xe2\xc4\x53\xaf\x10\xb9\xf0\xec\xd5\xbd\x56\x1c\xd7\x58\xe8\xf7\x9b\x69\xce\xb4\x0e\x76\xb4\x3e\xb1\x74\xe0\xfa\x19\xf1\xd7\xcf\xfc\x50\xc2\xb1\x8a\xbc\x15\xcc\x29\x33\x94\x67\x3a\x9a\x50\xf5\xba\x3f\x86\xc9\xcc\x9e\x17\x3e\x12\x55\x31\xd6\xa4\x8d\x2a\xf6\x54\x8f\x59\x56\xc5\x47\xde\x0a\x1f\x47\xfe\x23\x66\xa0\x18\xd5\x21\xff\xc9\x09\x66\x77\x8a\xb3\x27\x72\x43\x73\x96\xdd\x50\xfd\xfd\x51\x0b\xd2\x60\x49\xd8\xf2\x79\x49\x7e\x79\x68\xa8\x1d\xf7\xd2\xdc\x85\xea\x61\x44\xdc\xf8\x43\xfc\x3c\x8f\x93\x23\x3c\x3b\x59\xc8\xc7\x39\x74\x06\x6f\x4e\x9e\x51\x80\x13\xe7\xf1\xa0\x27\x89\xce\x86\x4e\xe7\xbe\xb0\xeb\xaf\x8f\xe3\xda\xbc\x56\x2a\x65\x75\xf2\x64\x0a\xcf\x45\xdc\x77\x7d\x7c\x36\x93\xc3\x2c\xe2\xba\x67\xde\x77\xe0\xad\xc8\xb2\xaa\xa1\xdb\x8e\xea\xda\x59\xe1\xd7\x9a\x95\xe4\x13\x94\xfd\x6d\x79\xa2\xb0\x79\x18\xd7\xe4\x66\xfb\x38\x57\x0d\x79\x2b\x4f\xb9\xdb\xa2\x9e\x37\xbd\xad\x8e\xaf\x43\x67\xe5\x54\x9f\x7b\xca\x0a\x26\x52\xbb\x73\x31\x3b\x29\x6c\x60\x47\x8d\xeb\x93\x52\x1e\xee\x4b\xce\x54\x70\x9e\x2e\xb7\x9f\xd8\x53\xa5\x9a\x4d\x73\x5f\x83\x96\xfb\x07\x28\x3e\xc9\x0e\x58\xc4\xf6\x89\x0b\x0e\x4a\xb2\xb6\x84\xfe\x31\xa4\x19\x0d\x67\x18\x0f\x65\x16\x87\xf6\xd8\xbf\xed\xdd\x69\xff\x32\xaa\xb6\x85\x37\x11\x9f\x31\x59\xca\x91\x20\x0a\x3e\xa3\xf2\x46\xc2\x01\x95\x33\x80\x44\xd5\xae\x33\xe0\x74\x8d\xda\xd3\xe7\xe4\xda\x84\xfd\xf9\x03\x08\x4c\x34\xcd\x13\x2b\x11\x13\xaa\x21\x1c\x00\x7f\x76\x7d\x06\x23\x20\xdb\x36\xee\xc3\xa9\x9c\x02\x6f\x59\x9b\xa2\xe7\x2e\xb3\x2c\xf9\x7c\x9c\x47\x03\x5a\xa3\xe0\xfc\x80\x28\x5c\x94\x50\x2e\x1f\xb0\xc6\x27\x46\x55\x53\xe9\x29\xce\xde\xf3\x28\x69\x28\x57\x21\x48\x3d\x17\x0b\x81\xfe\x2f\x00\x00\xff\xff\x43\x3d\x60\x66\x6c\x86\x09\x00") + +func operatorsCoreosCom_clusterserviceversionsYamlBytes() ([]byte, error) { + return bindataRead( + _operatorsCoreosCom_clusterserviceversionsYaml, + "operators.coreos.com_clusterserviceversions.yaml", + ) +} + +func operatorsCoreosCom_clusterserviceversionsYaml() (*asset, error) { + bytes, err := operatorsCoreosCom_clusterserviceversionsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operators.coreos.com_clusterserviceversions.yaml", size: 624236, mode: os.FileMode(420), modTime: time.Unix(1586375942, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _operatorsCoreosCom_installplansYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x5b\xcd\x72\x23\xb7\x11\xbe\xef\x53\x74\xad\x0f\xb2\xab\x44\x32\xeb\x5c\x52\xbc\x6d\xe4\x38\xa5\xc4\x59\x6f\x49\xf2\x5e\x1c\x1f\x9a\x33\x4d\x12\x11\x06\x18\x03\x18\x6a\x19\x97\xdf\x3d\xd5\x00\xe6\x97\x98\xe1\x50\xbb\xb2\x1d\x5c\x24\x01\x98\x46\x77\xa3\x7f\xbe\x06\x20\x2c\xc5\x07\x32\x56\x68\xb5\x06\x2c\x05\x7d\x74\xa4\xf8\x2f\xbb\x7c\xfc\x8b\x5d\x0a\xbd\x3a\xbc\xd9\x90\xc3\x37\xaf\x1e\x85\xca\xd7\x70\x53\x59\xa7\x8b\x3b\xb2\xba\x32\x19\x7d\x43\x5b\xa1\x84\x13\x5a\xbd\x2a\xc8\x61\x8e\x0e\xd7\xaf\x00\x14\x16\xb4\x06\xa1\xac\x43\x29\x4b\x89\xca\x2e\x75\x49\x06\x9d\x36\x76\x99\x69\x43\x9a\x7f\x14\xaf\x00\x50\x29\xed\x90\x09\x58\xfe\x10\x20\x17\xb6\x94\x78\x7c\xe7\x29\xdc\x06\x0a\xf0\x5e\xa2\x0a\xa3\x64\x33\x23\x4a\xe7\xb9\xbd\xa3\xd2\x90\x25\xe5\x2c\x20\xf0\x2a\xe0\x74\xbd\x28\xa0\xca\xc1\x90\xd5\xf2\x40\x90\x53\x49\x2a\x27\x95\x09\xb2\xb0\xd5\x06\x6e\x64\x65\x1d\x19\x4f\x12\xe0\x9e\xcc\x41\x64\x64\x5f\xd9\x92\x32\xe6\x62\x67\x74\x55\xae\x61\x84\xe5\x43\xad\xad\xc3\x1b\x94\xe5\x1e\xdf\xb4\x7d\x5e\x86\x45\x14\xbf\x33\x0c\x60\xc9\x1c\x28\x5f\x83\x33\x15\x85\x0e\xa7\x0d\xee\xa8\xe9\xb1\x99\x2e\x69\x0d\x2c\xb7\x2d\x31\xa3\x3c\xaa\x31\xaa\xa5\x94\x95\x41\xd9\xd7\x69\xa0\x23\xd4\xae\x92\x68\x7a\x43\x7e\x24\xec\x57\x54\x61\xa3\x41\x29\xac\xfb\xe7\x70\xe4\x3b\x61\x5d\xa0\xb6\xd7\xc6\xbd\x6b\x97\x5d\x80\x28\xfd\x2f\x19\x3a\xda\x69\x23\xda\x01\x2d\xfd\xfe\xe5\xb9\xdf\x7e\x94\xef\x8d\x50\x8e\xcc\x8d\x96\x55\xd1\x57\xc4\xcd\xfd\x07\xff\x8d\x3b\xb2\x84\xd6\x19\xa1\x76\xa7\xbb\xf9\xb0\x27\xd8\x0a\x63\x1d\xcf\x07\xa1\xc0\xed\xc9\x73\x0b\x7a\x0b\x59\xd8\xb0\xb8\x53\xd1\x5e\x3d\x9f\x9e\xd0\x3f\xee\xbf\x7f\xf7\x1e\xdd\x7e\x0d\x4b\xde\xc3\xe5\xf8\xf4\x1f\xff\xf4\x53\x87\xb3\xb7\x65\x69\xf4\x01\xe5\x4c\xf6\x30\x4e\x87\x42\xe7\x94\x5c\x18\x5b\x82\xfd\x35\xfc\x7e\xd6\x6b\x6c\xb4\x96\x14\xf7\x23\x4d\xc1\x4f\xf7\xd6\x6d\x0e\xf4\x83\x7a\x54\xfa\x49\x7d\x2b\x48\xe6\x76\x0d\x5b\x94\xd6\x5b\x4c\xb5\x31\xd1\x0d\xe3\xa6\x7c\x01\xd6\xa1\xab\x2c\x90\xc2\x8d\x24\xeb\x55\x18\xbb\x3a\xb3\x97\xd1\x00\xb9\x7f\x0d\xbf\xfc\xca\xf6\x8b\x52\xe4\xde\x0d\x03\x25\x5d\x92\x7a\xfb\xfe\xf6\xc3\x9f\xef\xb3\x3d\x15\xb8\x8e\x9e\xd2\xd3\x48\xc7\x7e\x20\xe7\x38\x10\x17\x8c\x76\xe8\xa9\xf1\xde\x21\x58\xf2\x9b\xd8\xfa\x53\x24\x17\xb4\xa1\x37\xff\xa1\xcc\xc5\x2e\x43\x3f\x57\xc2\x50\x5e\xaf\xb8\x80\x3a\xae\x34\x1d\xac\xa6\xf8\x47\x69\x98\xa6\x6b\xac\x92\x5b\x27\x9e\x35\x7d\x03\xce\xaf\x58\xb4\x30\xa7\xc7\x79\xf4\x62\xca\xc1\x7a\xb1\x99\x69\xb7\x17\x16\x4c\x1d\x69\xbc\x50\x1d\xb2\xe0\x05\x54\x51\x86\xa5\x0f\x25\x64\x2c\xfb\x51\x25\x73\xc8\xb4\x3a\x90\x71\x60\x28\xd3\x3b\x25\xfe\xdb\x50\xb6\x1c\xaa\xbc\x81\xa3\x23\xeb\x7a\x14\xbd\x1b\x29\x94\xbc\x29\x15\x5d\xfb\x48\x56\xe0\x11\x0c\xf1\x1a\x50\xa9\x0e\x35\x3f\xc5\x2e\xe1\x5f\xda\xb0\xe2\xb7\x7a\x0d\x7b\xe7\x4a\xbb\x5e\xad\x76\xc2\xd5\x11\x3c\xd3\x45\x51\x29\xe1\x8e\xab\x4c\x2b\x67\xc4\xa6\xe2\x5d\x58\xe5\x74\x20\xb9\xb2\x62\xb7\x40\x93\xed\x85\xa3\xcc\x55\x86\x56\x58\x8a\x85\x67\x5c\xf9\xb0\xbc\x2c\xf2\x2f\x1a\x3b\xbb\xea\x70\x7a\xe2\x2f\x4d\xd0\x19\xd5\x3b\x07\x1e\x10\x1c\xae\xc3\x67\x81\xff\x56\xbd\xdc\xc5\x5a\xb9\xfb\xdb\xfd\x03\xd4\x8b\xfa\x2d\xe8\xeb\xdc\x6b\xbb\xfd\xcc\xb6\x8a\x67\x45\x09\xb5\x25\x13\x36\x6e\x6b\x74\xe1\x29\x92\xca\x4b\x2d\x94\xf3\x7f\x64\x52\x90\xea\x2b\xdd\x56\x9b\x42\x38\xeb\xed\x8f\xac\xe3\xfd\x59\xc2\x8d\xcf\x4e\xb0\x21\xa8\xca\x1c\x1d\xe5\x4b\xb8\x55\x70\x83\x05\xc9\x1b\xb4\xf4\xe2\x6a\x67\x0d\xdb\x05\xab\xf4\xbc\xe2\xbb\xe9\xb7\x3f\xb1\xe7\x5f\x00\x75\x92\x4b\xee\x50\xc7\xa7\xef\x4b\xca\x1a\xef\x68\x7c\xf8\x6d\x59\x4a\x91\x05\xdf\x6e\xac\x82\x8d\x79\x43\x03\x23\xf6\x84\x62\xd8\x9b\x60\xe7\xd4\xe5\xc1\x7b\x39\x76\x43\x73\xbf\xb3\x47\x74\x71\x2e\x37\xd4\xd3\x76\xa4\x38\xfc\xf4\xfd\x37\x15\x40\xc0\x07\x91\xb0\x7a\xbf\x77\xa0\xac\x3a\x7b\xb0\x45\xb3\x59\x55\x96\x4c\x9b\x23\x4a\x2d\x45\x76\xf4\x70\x03\x55\x57\xb1\xcb\x01\xcd\xe4\x76\xb6\x4c\xf4\x15\x03\xc9\x24\x52\xb7\x71\x55\xa4\x69\xa0\x31\x78\x1c\x8c\x08\x47\xc5\xc9\xf4\x09\x2e\x5b\xbd\xa6\xd7\xe0\x70\xb6\x6b\xb0\x56\x68\xc1\x6e\xd2\xf3\x13\x4b\x84\xe9\x0d\x34\x9a\xf5\x5d\x4c\x6f\x63\x86\xfe\xba\x6b\xe9\x21\x43\xb6\xe1\x24\xa6\xb1\xad\x36\x45\xb0\x74\xdc\xe8\xca\x75\xb2\xe9\x30\x03\x58\x47\xa5\x6d\x0c\x99\xdd\x21\xd3\x45\x29\xc9\xf5\xb3\xe1\x12\xfe\xad\x20\xae\xc6\x81\xca\x19\x14\xd2\x93\xc5\xcc\x55\x3d\x5b\x0f\x02\x50\xcc\x9f\x47\xeb\xa8\x58\xbe\x7e\xae\x2f\x65\xe8\x50\xea\xdd\x7d\xf0\xd6\xde\x50\xb9\x47\x4b\x73\x1c\xc2\x39\x52\x15\xc7\xc0\x68\x5c\x6f\xb3\x4c\x57\xca\xdd\xd1\x76\xda\x47\xc6\xbf\x03\x43\x5b\x32\xa4\xb2\x1a\xa8\x84\x09\x80\x61\xc6\xa9\x05\xee\xd1\xb1\xab\x55\x36\xa8\x38\xd7\x01\x34\xe7\x0d\xb0\xa8\x95\x9d\xf6\xb0\x13\x65\x4d\xc9\x0b\xa3\x48\x22\x2d\xe6\xfb\xdb\x1a\x3d\x04\xd0\x40\xb5\x74\x6e\xc8\x0c\x4c\x59\x3a\xb7\x2d\xe3\x3c\x0f\x09\xcf\xad\x7a\x75\x1b\x95\xe8\x73\xa9\xd3\x5c\x06\x09\xca\xa8\x07\x4a\xbc\x52\x08\xf3\xd0\x99\x20\x09\xc0\x29\xc7\x50\x9c\x7f\x1d\x32\x67\x4c\xd0\x2d\x90\x71\x28\x14\x60\x40\x8a\x1e\xb5\xae\xfe\xae\x03\xaf\x49\x9a\x98\x65\x64\x6d\xb0\xe2\x82\x94\xbb\x06\x5b\x65\x7b\x40\xcb\x22\xb0\x89\xb2\x1f\xd0\xb2\x40\x25\xb6\x64\xdd\x32\xae\x40\xc6\xfe\xf8\xf5\x4f\x29\x9d\x01\x7c\xab\x0d\xd0\x47\x64\xc7\xba\x06\x11\xb4\xdc\x40\x81\x68\x4a\x3e\x16\xb3\x22\x1a\x7a\xf0\x24\xdc\x5e\xa4\x05\x47\x28\x75\x1e\x05\x7e\xf2\x82\x3a\x7c\x24\xd0\x51\xd0\x8a\x0b\x90\x47\x5a\xc3\xeb\x50\x55\x34\x2c\xfe\xc2\xd0\xfe\xd7\xd7\x49\x9a\x5f\x3e\xed\xc9\x10\xbc\xe6\x29\xaf\x03\x63\x0d\xda\xe3\xbe\xda\x3e\x5a\x06\xbd\x61\x3b\x23\x76\x3b\x32\x94\xd6\xa6\x87\x30\x0c\x0d\xbe\x02\x36\xf3\x2d\x28\xdd\x21\xe0\xc9\xf2\x9e\x95\x94\x89\xad\xa0\xfc\x84\xe1\x1f\xbf\xfe\x69\x84\xdb\xbe\x9e\x40\xa8\x9c\x3e\xc2\xd7\xa1\xfe\x12\x96\xf5\xf3\xd5\x12\x1e\xbc\x45\x1c\x95\xc3\x8f\xbc\x4e\xb6\xd7\x96\x14\x68\x25\x87\xc9\x23\x72\xab\x61\x8f\x07\x02\xab\x0b\x82\x27\x92\x72\x11\x70\x44\x0e\x4f\x78\x64\xf9\xeb\xed\x62\x0b\x43\x28\xd1\xb8\x3e\x8e\x4e\x52\x7d\xf8\xfe\x9b\xef\xd7\x81\x2b\x36\xa1\x9d\x62\x56\x18\x9f\x6d\x05\xa3\x65\x86\xc9\x01\xf3\xb1\x4d\x7a\x75\x54\xc1\x38\x38\x1c\xef\x51\xed\x28\x49\x36\x56\x9a\xdb\x8a\x51\xd8\xf2\xea\x52\x6f\x1d\x02\xde\xba\x25\x80\xef\x30\x30\xfc\x4e\xf0\x71\x96\x58\xbe\x76\x3d\x2b\xd6\xbb\x8e\x3d\x4f\x8a\xf5\x58\x6d\xc8\x28\x72\xe4\x25\xcb\x75\x66\x59\xa8\x8c\x4a\x67\x57\xfa\xc0\x81\x9f\x9e\x56\x4f\xda\x3c\x0a\xb5\x5b\xb0\x21\x2e\x82\x25\xd8\x95\x3f\x03\x59\x7d\xe1\x7f\x3c\x4b\x8a\x24\x64\x48\x8b\xe2\xa7\xfe\x16\xf2\xf0\x3a\x76\x75\xb1\x38\x35\xe0\x9e\x9b\x95\xae\xee\x43\x40\xc8\x86\x5f\xb2\x4b\x3c\xed\x45\xb6\xaf\xcb\xdb\x36\x7a\x26\x7d\xa4\xc0\x3c\x84\x5c\x54\xc7\x17\x37\x5b\x56\x64\x65\x98\x9f\xe3\xc2\x93\xd0\x72\x81\x2a\xe7\xdf\xad\xb0\x8e\xfb\x2f\xd6\x5c\x25\x66\x38\xe9\x0f\xb7\xdf\xfc\x36\xc6\x5c\x89\x0b\x3d\x72\x53\xa9\x5c\xd2\x77\x5a\x3f\x56\xe5\x09\x50\xe9\x09\xf1\xd7\xee\xcc\xba\x30\x89\xb5\x9b\x50\x8b\xd2\xe8\x9d\xe1\xbc\xdc\xa9\x73\xa1\xac\xa4\x3c\x05\x3f\x2a\x87\x4a\x95\x98\x3d\xe2\x8e\x22\x03\x3e\xe5\x90\x72\x75\x3a\x8b\xe5\x46\x1a\x6e\x5d\x50\x5b\x8c\x4a\x10\xce\x0a\x22\xaf\x35\xab\x03\xd6\xea\x5c\x3a\xac\xe8\x43\xf3\xf8\x39\x72\x3f\xcd\xf5\x24\x4c\x4c\x23\xeb\xd0\x06\xf8\xfa\x8e\xb6\x89\x29\x22\x67\x0b\xdf\x8a\x41\x21\x14\x06\x4b\x74\xfb\x44\xb7\xa1\x52\x62\x1f\xb0\x87\x36\x05\x5a\xe1\x84\x9b\xd4\x9c\x81\xce\x6f\x06\x9f\xd4\x7a\xaf\xc3\x42\xd4\x5c\x6f\x5a\x92\x6a\x40\x2a\x51\xdf\x2c\x16\x3c\xa1\xad\xcf\xe1\x73\x7f\x1c\x93\x4e\xee\x93\xba\x3f\x2f\x33\x9c\x85\xeb\x09\xa9\x2f\x04\xed\x5d\x46\x47\xc2\x4c\x68\x93\x00\x3e\xc1\xc7\x45\x30\x7e\x94\x64\x7d\x14\x79\x31\x98\x9f\xa0\xd8\x83\xf9\x2f\x01\xe9\x43\xfb\xfc\xc0\x3e\xb4\x97\x80\xf7\xa1\xbd\x08\xc8\x0f\xed\xc5\xa0\x7e\xe4\xfc\xf9\x80\x7f\xca\xf4\x94\x3c\x3e\x07\xf6\x9f\xb5\xe6\xfa\x60\xfd\x22\xf0\x3f\x41\x35\x55\x16\xcc\x28\x01\xe2\xd6\x9c\xf7\xfd\xb1\x72\x20\xb4\x3f\x7e\x51\x30\x5b\xd0\xb1\x02\x21\x25\xe8\x1f\xa0\x4c\xb8\x48\xae\xd1\x92\x61\x4c\xb8\x3f\x40\xe1\x30\x5b\xc0\x19\x45\x44\x4a\xcc\x4b\x4b\x89\x69\x1f\xfc\x7f\x28\x28\x66\x6b\x74\xa4\xb8\x48\x69\xf1\x77\x2f\x31\x66\x09\x95\x69\x15\x6e\xf5\x47\xb0\x56\x1f\x3b\x36\x93\x87\x27\xe8\xcc\x28\x4a\xd9\x3d\xc9\xee\x82\xfb\x29\x20\x98\x2a\x1e\x42\x1b\x29\x21\xba\x1f\x4f\xa0\xc8\x29\x1c\x1f\xda\x22\x75\xc4\xdf\x1d\xe6\x35\x46\x06\xcf\x63\x54\x00\x89\xd6\x3d\x18\x54\xd6\xeb\xec\x41\x4c\x45\xd1\x81\xa6\xbf\x43\x2e\x82\x44\xd1\xd4\x3c\x41\xef\xe0\x1a\x72\x11\x62\x83\x56\x53\xfe\x57\x5f\x8e\x78\x3c\xa5\xb4\xdb\xa7\x8a\xa1\xb6\xcd\xf0\x00\x6e\xe1\xb6\x64\x0d\x39\x3a\x5a\x30\x97\x93\x2a\xf8\xc1\x5f\xa8\x7e\x16\xf1\xb9\xc2\x28\x8d\xde\x50\xfe\x9b\x4a\x51\x90\xb5\xb8\x9b\xcf\xfe\x5b\xd8\x57\x05\x2a\x30\x84\x39\x6e\x24\xd5\x04\x18\x81\xf9\x1b\x55\xb5\x83\x9c\x1c\x0a\x39\x66\x7b\xa1\xb5\x77\x51\xed\xbe\x7f\xb2\xe0\x86\xd0\x4e\xe5\x82\xc4\x63\x98\xf0\x89\xbf\xdf\xec\xed\xc7\x95\xf5\x1b\xfc\x39\xb9\x3b\xbd\xcb\x9b\xe4\x2e\xde\xb0\xb5\x10\x3c\x30\x76\xcd\x6e\xc1\xbd\x0f\xa6\xa2\x6b\xf8\x16\xa5\xa5\xeb\x49\x55\xc7\x87\x37\x9f\xcc\xbf\x9f\x34\x5b\xb7\xc7\xd2\x73\xd9\xf0\xfd\x09\xcb\xb7\xa7\x0e\x33\x02\xf9\x6d\x33\xb9\x3e\x37\x8a\x27\x09\x8b\x4a\x89\x9f\xab\x7e\x61\x53\x5f\xb9\x8d\xf0\xf6\xe5\xb0\x10\xba\xb9\xff\xe0\x4d\x25\x1c\x0d\xd8\x50\x0e\xd5\x85\xe8\xcd\xfd\x07\xfb\xd5\x44\x36\x18\x95\xaf\x1c\x2d\xb2\x7b\x92\x71\x2d\x3e\x28\xd2\xa4\xce\x3a\x0f\x94\xda\x03\xa2\xb2\x3a\xbd\x3e\xac\xdb\xad\xbb\xb2\xcc\x91\xc8\x50\xca\x23\x57\x27\xa2\x60\x07\x6e\x30\xcf\x54\x46\x1b\x95\xa1\x3e\xe5\x99\x21\xc7\x5d\x9c\x5a\xef\x4f\x57\xc1\xad\x08\x91\xa0\x2f\xef\x26\x0e\x69\xd8\x19\xb6\xba\x52\x39\xa0\xf3\x1a\xba\x90\xfb\xfe\xa5\xf2\xcb\xbd\x2f\x18\x07\x23\x9f\x76\xce\xd8\x79\x00\xd0\x40\x98\x29\x04\xe3\x23\x4a\xea\xd0\x96\xeb\xf5\x8f\x94\x55\x8d\x39\x4d\xbe\xf6\x38\x03\x52\xa6\x21\xc4\x5c\xf0\x70\x36\x2a\xcd\x49\x73\x73\xd2\xf4\x67\x59\x68\x32\x93\x9e\xf5\x9e\xf1\xc4\x95\x86\xa9\x77\x21\x6f\xf9\x13\xce\x0c\x0b\x92\x19\x5a\xca\x87\xe9\xcc\x63\xd6\x31\xef\x39\x93\xd9\xce\xb0\x3c\x95\xcd\xce\x7c\x3a\x9e\x48\xce\x9a\xb6\x4f\x2b\x61\xd6\xa6\xf3\x32\x94\x26\x9e\x05\x40\xef\x89\x27\x3a\x56\x18\x19\x1f\xb1\xc3\x5b\x3a\x64\x1d\x3e\xed\xb5\xbc\x34\xf2\xf9\x57\x27\x93\xf7\x18\x9d\x95\xdf\xf3\xe4\x26\x25\xf9\x42\xce\xb5\x1e\x09\x78\xf2\xc4\xb9\xdb\xc6\x59\x1c\x67\x4e\xe2\xc8\x53\xa6\xe7\x86\x9a\x7b\x47\xe5\x30\xb4\x74\x04\x50\x1e\x02\x1e\x44\x5e\xa1\xf4\x4f\x89\x40\xa8\xf4\x9e\x3c\x3f\xae\x4c\x5d\x5f\x84\xe3\xf9\x94\xc5\x2d\x9a\x6a\x3f\x31\x34\x52\x27\x4d\x47\xb0\x66\xad\x67\x7a\x7b\xfa\xe5\x58\x68\x27\x3a\xaf\xff\x4b\x61\x42\xf7\xe3\x02\x46\x86\x34\x6c\x3c\xd2\xce\x1e\x29\x87\xcd\xf1\x6c\x74\x87\x39\x65\xe8\x74\x11\xba\x08\xff\x80\x30\x32\xf6\x28\x54\xfa\x80\x33\x3c\x38\x1f\x19\x6a\x9f\xd0\x9d\x9d\xe0\x0f\x99\x46\x66\xc5\xeb\x92\xe4\xe8\xf9\xea\x37\xfc\x5f\xc5\xcb\x1d\xaf\xce\x20\x50\x5f\x4e\x7c\x12\x91\xe9\xb3\xcf\x19\x04\x5a\x65\x7f\x26\x32\x67\x4e\x2c\x67\xd0\x3a\x9c\x3b\x0e\xfc\x84\xac\x76\xe2\x98\xb1\x42\x9b\x88\xe9\x25\x1a\x27\xb2\x4a\xe2\x58\x85\xd1\xbc\x0e\xf7\xe1\x73\x32\x07\x9c\x70\xff\xbf\x00\x00\x00\xff\xff\x6f\x96\xa7\xeb\xf3\x34\x00\x00") + +func operatorsCoreosCom_installplansYamlBytes() ([]byte, error) { + return bindataRead( + _operatorsCoreosCom_installplansYaml, + "operators.coreos.com_installplans.yaml", + ) +} + +func operatorsCoreosCom_installplansYaml() (*asset, error) { + bytes, err := operatorsCoreosCom_installplansYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operators.coreos.com_installplans.yaml", size: 13555, mode: os.FileMode(420), modTime: time.Unix(1586375941, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _operatorsCoreosCom_operatorgroupsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x59\xcd\x72\xdb\x38\x12\xbe\xfb\x29\xba\x34\x07\x4f\xaa\x2c\xa9\x92\xbd\x6c\xe9\xe6\x4a\x32\x53\xda\xc9\x38\x2e\xdb\xc9\x65\x6a\x0e\x2d\xb2\x25\xf6\x0a\x04\xb8\x00\x28\x59\x9b\xca\xbb\x6f\x35\x40\x52\x24\x45\xca\x4a\x76\x33\x8b\x8b\x2d\xfc\x34\xfa\xf7\xeb\x6e\x10\x0b\xfe\x4c\xd6\xb1\xd1\x0b\xc0\x82\xe9\xd9\x93\x96\x5f\x6e\xb6\xfd\xbb\x9b\xb1\x99\xef\x5e\xaf\xc8\xe3\xeb\xab\x2d\xeb\x74\x01\x6f\x4b\xe7\x4d\xfe\x40\xce\x94\x36\xa1\x77\xb4\x66\xcd\x9e\x8d\xbe\xca\xc9\x63\x8a\x1e\x17\x57\x00\x1a\x73\x5a\x80\x29\xc8\xa2\x37\x76\x63\x4d\x59\xb8\x59\xfd\xd3\xcd\x12\x63\xc9\xc8\x9f\xfc\xca\x15\x94\xc8\x89\xb0\xe7\x78\xa4\xb3\x07\x60\x57\x33\xb8\x7b\x7d\xfc\xe5\xe4\xdc\xb4\xba\x2b\x2c\x00\x38\xb2\x3b\x4a\x17\xe0\x6d\x49\x71\xc2\x1b\x8b\x1b\x6a\xcd\x4c\x5c\x92\x51\x8e\x93\x45\xf8\x05\x30\x31\x05\xe9\xdb\xfb\xe5\xe7\xbf\x3d\x76\x17\x00\x52\x72\x89\xe5\xc2\x87\xab\x3f\x56\xac\xfd\x2a\x9c\x02\x3b\xf0\x19\x41\xa9\xd9\x83\x59\x43\x5e\x2a\xcf\x9e\x34\xea\xe4\x00\x6b\x63\xe1\xe3\x87\xdf\x21\x47\x8d\x1b\x4a\x5b\x42\x35\x94\x01\x96\x1e\x12\xa3\x9d\xb7\xc8\x3a\xd2\x62\xed\x3c\x2a\x85\x72\x9d\xd0\x6c\x8e\x01\x6b\x60\xef\x82\xa4\xae\xc0\x84\xc0\x1b\x40\x10\xd5\xf1\x9a\x29\x6d\x51\x75\x14\xd8\xf1\x68\x37\xe4\x8f\x07\x5a\x37\xfb\x43\x21\xa6\x59\xfd\x93\x12\xdf\x4c\x5a\xfa\x57\xc9\x96\xd2\xa3\xe8\x53\xa8\xed\xd9\x4c\x15\x56\x38\xf2\x4c\x6e\xd1\xba\xb1\xe5\x3e\xad\xd9\x9e\xee\xae\x45\xbf\x71\x17\xa4\xe2\x32\x14\x45\xae\x2c\x49\x29\x44\xa3\x04\xde\x33\x76\x60\xa9\xb0\xe4\x48\xfb\xa0\x8d\x0e\x61\x90\x4d\xa8\x2b\x11\x66\xf0\x28\x26\xb7\x0e\x5c\x66\x4a\x95\x8a\x52\x77\x64\x3d\x58\x4a\xcc\x46\xf3\xbf\x1b\xda\x4e\xb4\x26\x97\x2a\xf4\xe4\x7c\x8f\x26\x6b\x4f\x56\xa3\x82\x1d\xaa\x92\x6e\x00\x75\x0a\x39\x1e\xc0\x92\xdc\x02\xa5\x6e\xd1\x0b\x5b\xdc\x0c\x7e\x37\x56\xac\xb6\x36\x0b\xc8\xbc\x2f\xdc\x62\x3e\xdf\xb0\xaf\xc3\x26\x31\x79\x2e\xee\x71\x98\x27\x46\x7b\xcb\xab\x52\x6c\x39\x4f\x69\x47\x6a\xee\x78\x33\x45\x9b\x64\xec\x29\xf1\xa5\xa5\x39\x16\x3c\x0d\xac\x6b\x1f\x62\x2f\x4f\x7f\xb2\x55\x8c\xb9\xeb\x0e\xaf\xd1\x80\xce\x5b\xd6\x9b\xd6\x42\x88\xce\x33\x16\xf8\x8d\x75\x2a\x5e\x8b\xd5\xd1\x28\xc5\x51\xd1\x32\x25\xda\x79\x78\xff\xf8\x04\xf5\xd5\xc1\x18\x7d\xed\x07\xbd\x1f\x0f\xba\xa3\x09\x44\x61\xac\xd7\x64\xa3\x11\xd7\xd6\xe4\x81\x26\xe9\xb4\x30\xac\x7d\xf8\x91\x28\x26\xdd\x57\xbf\x2b\x57\xb9\xb8\xb8\x38\x22\x39\x2f\xb6\x9a\xc1\x5b\xd4\xda\x78\x58\x11\x94\x45\x8a\x9e\xd2\x19\x2c\x35\xbc\xc5\x9c\xd4\x5b\x74\xf4\xc3\x0d\x20\x9a\x76\x53\x51\xec\x65\x26\x68\x23\x60\x7f\x73\x2f\xe0\x00\x6a\xe0\x1b\xb1\x57\x07\x6d\x1e\x0b\x4a\x6a\xc4\x91\x73\x01\x61\x50\xf7\x20\xa9\x36\xda\xec\x92\xeb\xc7\xc2\x39\xb0\x46\x8a\x12\x6f\x6c\x7f\xbe\xc7\xe2\x63\xb5\xad\xda\x1f\xd9\xeb\xb0\x74\xed\xce\x21\xd1\x8b\x1c\x9e\xe7\x32\x68\x1c\x7d\x92\xbd\x7f\x16\x4f\x6c\x32\xc2\xe9\xe8\x70\xdd\x3f\x12\x63\x42\xb1\x0b\xc0\xa9\x70\x45\xaa\x51\x40\x8d\x8b\x79\x70\xf3\x41\xda\x00\x4f\x19\x75\xf6\x01\x5a\x82\xdb\xbb\x77\x94\x0e\x9f\x88\xe2\xa2\xb5\x78\x18\x5c\x67\x4f\xf9\x88\x20\x3d\x51\x6e\xcf\xb0\x5b\x85\x7a\xbd\xe2\x33\x3c\xd5\x6d\x3d\x24\x3c\x42\x16\x8a\xc0\x76\x03\x08\x5b\x3a\x44\x0c\x14\xa0\xad\x4c\x1a\x88\x80\xa5\x80\x9f\xc1\xd8\x5b\x1a\x16\x41\x86\x1c\xae\x80\x72\x64\xcf\x59\xbb\xc7\x71\x9a\x97\xba\x63\x7a\x86\x85\x69\xc3\xf8\xc8\x86\xf3\xae\x15\xc7\x96\x0e\xe3\x8b\x3d\x73\x6c\xe9\x50\x07\x69\xb4\x8b\x4c\x04\x9d\x85\xb8\xad\x4d\x81\x45\xa1\x98\xfa\xa8\xda\x1d\xde\x8c\x29\x0d\xc6\xe1\xa7\x3b\x6a\xe1\x2f\x66\xbf\x31\xf3\x11\xd9\xa3\x23\x5c\xbb\x68\x74\x89\x96\x8c\x0b\xf0\xe6\x2c\xef\x58\x17\x21\x75\x9a\xfc\x8c\x8a\x5b\x45\x50\x88\x8f\xa5\xbe\x81\x3b\xe3\xe5\xcf\xfb\x67\x76\xfe\xbc\x3a\xc4\x97\xde\x19\x72\x77\xc6\x87\xdd\xff\xb5\x72\x22\x6b\x17\xab\x26\x6e\x0f\x21\xa5\x63\xe4\x8a\x7c\xed\x3c\xea\x66\xb0\x94\xd2\x85\xce\xca\xd1\xa8\x98\x9d\x64\x32\x63\x6b\x1d\x84\x5a\x28\x5e\x12\xc9\xe7\xa5\x0b\x89\x4f\x1b\x3d\xa5\xbc\xf0\x87\x73\x22\x43\x75\x77\x87\x7e\x54\xab\xdc\xd1\xd6\x5c\xfb\xaa\xf3\x2a\xef\xb0\x11\x59\x80\x27\xc9\xea\x71\x25\xd6\x68\x0a\x13\x4a\x21\x2d\x83\x22\x42\x65\x81\x9e\x36\x9c\x9c\x25\x9d\x93\xdd\x10\x14\x82\xc3\x2f\x1b\x72\x1c\x27\xe3\x38\x8b\x96\x6d\x42\xa3\x1e\x11\x12\xc2\x07\x09\xd8\x8b\xd3\x47\xdc\x1d\x21\x36\xc7\x42\x9c\xe1\x8b\x20\x66\xd0\xeb\x57\x28\x90\xad\x9b\xc1\x2d\x38\xd6\x1b\x35\xe6\x12\xed\x13\xac\x83\x5d\xda\xc4\x85\x2e\x3b\x10\x00\xdc\xa1\x12\x44\x97\xa2\x5f\x03\xa9\x80\xef\x23\x44\xa5\x15\xe9\x25\xb8\x1b\xd8\x67\xc6\x05\xb0\x86\x35\x93\x0a\x55\xe0\x64\x4b\x87\xc9\x4d\xc7\x69\x46\x28\xca\xe6\xa5\x9e\xc4\x6c\x70\xe2\xa7\x4d\xea\x30\x5a\x1d\x60\x12\xd6\x26\xb3\x93\x9c\x38\x42\xfb\xa2\x4c\x79\x26\x41\x60\x9a\x86\xde\x13\xd5\xfd\x8b\x58\x7e\xc6\x0b\xa4\x69\xe4\x84\x6e\x93\xc4\x94\xda\xdf\x49\x3f\xf9\x52\xe5\xd3\x3f\x50\x03\x3f\xa6\x39\xeb\x63\x67\x56\x93\x06\x8c\x5b\x07\x78\xdb\x67\x9c\x64\xb0\x67\xa5\x42\xa1\xeb\x28\x15\x43\xa7\x54\x28\x73\x68\x6c\xf3\xb3\x7b\x15\x7d\x84\xdd\x31\xc8\x43\xcb\x3c\x56\x4e\x0d\x0b\x2a\xed\x54\x72\x6f\xcd\x8e\x53\x4a\x6f\xef\x97\x03\xda\xea\x0a\x1a\x0e\x80\x27\xa5\x5c\x68\x69\xa5\x1e\xf7\xa6\xaa\xc7\x07\x0b\xbe\xa2\x45\x7d\x40\xdc\x50\xd2\x0f\x35\x75\x35\xe3\x2b\x63\x14\x61\x7f\x35\x96\x91\x77\x4d\x15\xf9\x02\xdf\x4f\xbd\xed\x15\x74\xd3\x73\xa1\x38\x61\x5f\x67\xa8\x63\x55\x3a\x9c\xd4\xe2\xad\x01\xda\x39\x54\x54\x8e\xfc\xcd\xb1\xee\x65\x07\xbc\xd1\xc6\x0e\xf9\xef\x39\xf4\x1a\xc5\xac\x11\xd3\x89\xd9\x4a\x77\x71\xb3\x10\x76\x37\xed\x42\xfc\x35\xd4\x30\x3c\x7c\x5b\xbf\x30\x5c\x8b\x4d\x41\xa1\xf3\x9f\x62\x7f\x76\x61\x7f\xd1\x3a\xf1\x82\x1d\x3f\x1c\x77\x46\xb4\xf5\x9c\x93\xf3\x98\x17\xf1\xa5\x80\x02\xad\x30\x3b\xe4\x8c\x03\x2a\xae\xf4\xb1\x47\x07\x15\xe1\x6f\x8a\x20\x10\x4d\xe6\xe8\x17\x20\x67\xa7\x72\x71\x6f\x87\xbe\xd4\x47\xbb\xde\x19\x4b\xc4\xe1\xc7\x9b\x60\xbd\x13\xf1\x7e\xac\xcf\xc1\x09\x2e\x3e\xd0\xfa\x9b\x60\xf1\x81\xd6\x60\x69\x4d\x96\x74\x42\xb5\x80\x1d\x34\x1c\x87\xf6\x06\x40\xff\xa7\xdd\xe2\xd8\x53\xd5\xa0\x30\xb7\xf7\xcb\xfa\x79\xaa\xf6\xb5\x4a\x9a\x91\xf2\xf3\x85\x32\x23\xa4\xde\x7b\xf4\xd9\x05\x77\x5f\x2f\x2b\xd5\x85\xba\x2a\x3c\xf6\x15\x4c\x09\x75\x5e\xbe\xc2\x7b\x21\x61\x0a\x66\x3d\x96\x5e\x35\x90\xf6\x6c\xa9\x3a\x71\x13\x13\x48\x55\xb7\x1e\xdf\xcb\x24\x83\x03\x4a\x62\xe7\x14\xfe\xf1\xf8\xf1\x6e\xfe\xeb\x58\x89\x1f\x0b\x08\x4c\x12\x72\x2e\xc4\x52\xc8\xef\x37\xe0\xca\x24\x03\x74\x22\x86\x60\x84\x60\x10\xcd\x72\xd4\xbc\x26\xe7\x67\xd5\x1d\x64\xdd\x1f\x6f\xfe\x1c\xab\xf9\x7e\x31\x16\xe8\x19\xf3\x42\xd1\x0d\x70\x55\xd0\xd6\x6f\x4d\x95\x1b\x85\x48\x11\x75\x34\x14\x61\xcf\x3e\xe3\xd3\x64\x52\x69\x00\x0a\x93\x56\x62\xef\x83\xb8\x1e\xb7\x04\xa6\x12\xb7\x24\x50\xbc\xa5\x05\x4c\xc4\xe1\x5a\x6c\x7e\x91\xe0\xfb\x3a\x19\xa1\xfa\xf3\x3e\x23\x4b\x30\x91\x4d\x93\xc8\x5c\xf3\xb8\x28\x73\xb5\xbf\x1c\x99\x8c\x5d\xa0\xe5\xcd\x86\x6c\x0f\x26\x8f\x23\xbc\x94\xed\x48\xfb\x57\x52\xb5\xf3\x1a\xb4\x69\x91\xd0\x55\x95\x71\xac\x2d\xfa\x4c\xff\xf1\xe6\xcf\x51\x8e\xbb\xfa\x02\xd6\x29\x3d\xc3\x9b\xa6\xa6\x28\x4c\xfa\xaa\x2a\xef\xdd\x41\x7b\x7c\x96\x9b\x12\x29\x1b\xc7\x34\x1b\xea\x3d\x6f\x20\xc3\x1d\x81\x33\x39\xc1\x9e\x94\x9a\xc6\xc7\xdd\x14\xf6\xb1\x45\xaa\x0d\x17\xdb\x83\x02\xad\x3f\xeb\xad\xf5\x93\xee\xd3\xc7\x77\x1f\x17\x91\x33\x71\xa8\x8d\x16\x76\xa4\xf4\x58\xb3\x46\x55\x55\xa2\xec\x9a\x72\x76\x84\xa2\x2b\xa3\xfb\x78\x03\x49\x86\x7a\x43\x75\x9d\xbd\x2e\x7d\x69\x69\x76\xfd\x3d\x71\x7c\xfa\xce\x5a\x8f\x81\xf7\xd6\x3e\x70\xfc\xdf\x5e\x2c\x2f\x14\x4e\x0f\xd6\xbe\xa7\xc2\xdd\xb5\xbc\xfc\xac\x70\xdb\x72\x45\x56\x93\xa7\x20\x5f\x6a\x12\x27\xa2\x25\x54\x78\x37\x37\x3b\x49\x07\xb4\x9f\xef\x8d\xdd\xb2\xde\x4c\xc5\x35\xa7\xd1\x07\xdc\x3c\xa4\xbf\xf9\x4f\xe1\xcf\x77\xcb\x12\xf2\xe7\xa5\x02\xc5\x4f\x2b\x7f\x81\x54\x21\xa9\xcf\xbf\x4b\xa8\xfa\x8d\xf7\xf2\x3c\x76\xfd\x18\x01\x23\xe9\x9f\x95\xb0\x88\xcd\x47\xf5\xc5\xa5\xc2\xd8\xf1\x3e\x30\xc7\x34\x42\x33\xea\xc3\x0f\x77\x65\x51\x68\x69\x85\xa3\xc3\x34\x90\x30\x6a\x8a\x3a\x95\xff\x1d\x3b\x2f\xf3\xdf\xa5\xc1\x92\x2f\x0a\xdf\x4f\xcb\x77\x7f\x8d\x83\x97\x7c\x61\xac\x1e\xbf\x74\xa2\x2a\x32\x7c\x13\xce\x8c\x7f\xef\x5c\xa3\x72\x54\x7d\x8a\xad\xaa\xa1\x42\x95\x16\x55\xff\xbb\x6c\x3c\xc6\x7a\x53\x2a\xb4\xbd\xc5\xb0\x16\xbf\xfb\x76\x6a\xcf\x30\xaf\xd8\xf9\xdf\x4e\xd7\x3e\x70\xf5\x71\xcd\x65\xc6\xc6\x1e\x2c\x5e\x3f\x05\x13\x0d\x92\xa0\xa7\x8d\xb1\xdc\x5a\x50\xf9\x15\x80\x4b\x8c\x48\xdc\xc4\xa2\x24\xca\xf0\x14\x69\x77\xf4\x49\x6f\xb5\xd9\xeb\x5f\x04\xf1\xdd\x51\x3a\x57\xae\x9a\xcf\x64\x91\xd8\x4f\x75\x89\x4f\x1a\x57\x8a\x3a\x5d\x50\x6b\xf7\xac\xd2\x56\xe8\xac\xe0\xcb\xd7\xab\xff\x04\x00\x00\xff\xff\x08\x45\x4a\x89\x07\x1f\x00\x00") + +func operatorsCoreosCom_operatorgroupsYamlBytes() ([]byte, error) { + return bindataRead( + _operatorsCoreosCom_operatorgroupsYaml, + "operators.coreos.com_operatorgroups.yaml", + ) +} + +func operatorsCoreosCom_operatorgroupsYaml() (*asset, error) { + bytes, err := operatorsCoreosCom_operatorgroupsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operators.coreos.com_operatorgroups.yaml", size: 7943, mode: os.FileMode(420), modTime: time.Unix(1586375941, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _operatorsCoreosCom_operatorsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x5a\x4b\x73\xdb\x38\xf2\xbf\xfb\x53\x74\x69\x0e\xfe\x4f\x95\x1e\xff\x64\x2f\x53\xba\xb9\x9c\xcc\x96\x77\x67\x9d\x54\xec\xe4\x92\xca\xa1\x45\xb6\x44\xac\x40\x80\x83\x06\x25\x6b\xa7\xe6\xbb\x6f\x35\x40\x52\xa4\x2c\x92\x72\xc5\x99\xc5\xc5\x16\x08\xf4\xe3\xd7\x4f\x80\xc4\x42\x7d\x21\xc7\xca\x9a\x25\x60\xa1\xe8\xc9\x93\x91\x5f\x3c\xdf\xfe\xc2\x73\x65\x17\xbb\x37\x2b\xf2\xf8\xe6\x6a\xab\x4c\xba\x84\xdb\x92\xbd\xcd\x3f\x11\xdb\xd2\x25\xf4\x8e\xd6\xca\x28\xaf\xac\xb9\xca\xc9\x63\x8a\x1e\x97\x57\x00\x68\x8c\xf5\x28\xd3\x2c\x3f\x01\x12\x6b\xbc\xb3\x5a\x93\x9b\x6d\xc8\xcc\xb7\xe5\x8a\x56\xa5\xd2\x29\xb9\xc0\xa1\xe6\xbf\xfb\xff\xf9\xdb\xf9\x2f\x57\x00\x89\xa3\xb0\xfd\x51\xe5\xc4\x1e\xf3\x62\x09\xa6\xd4\xfa\x0a\xc0\x60\x4e\x4b\xb0\x05\x39\xf4\xd6\xf1\xfc\xf8\x5f\x62\x1d\x59\xf9\x93\x5f\x71\x41\x89\x30\xde\x38\x5b\x16\xed\xd5\xad\x35\x91\x54\x25\x5f\xd4\xed\x43\xb5\x2e\x4c\x69\xc5\xfe\x9f\x9d\xe9\xdf\x14\xfb\xf0\xa8\xd0\xa5\x43\xdd\xa2\x1b\x66\x59\x99\x4d\xa9\xd1\x1d\xe7\xaf\x00\x38\xb1\x05\x2d\xe1\x56\x97\xec\x49\x26\x76\xa8\x55\x1a\x74\x8b\x9c\x6d\x41\xe6\xe6\xe3\xdd\x97\xbf\x3d\x24\x19\xe5\x18\x27\x01\x52\xe2\xc4\xa9\x22\xac\x6b\x04\x00\x47\x85\x23\x26\xe3\x19\x10\x92\x48\xb3\xe1\x36\xaf\xb6\xfa\x83\x70\xb4\xab\x7f\x53\xe2\xab\xa9\xc2\xc9\x22\xaf\x6a\x75\x65\xb4\xec\xde\xcc\x9d\xf0\xbd\x16\xc1\xe2\x1a\x48\xc5\xd2\xc4\xe0\x33\x82\xca\x5e\x94\x02\x07\xa1\xc1\xae\xc1\x67\x8a\x8f\xf2\x05\x05\x5b\x64\x41\x96\xa0\xa9\xa4\x9a\xc3\x03\x39\x21\x02\x9c\xd9\x52\xa7\xe2\x1e\x3b\x72\x1e\x1c\x25\x76\x63\xd4\x7f\x1a\xca\x0c\xde\x06\x96\x1a\x3d\xb1\xef\x50\x54\xc6\x93\x33\xa8\x05\xd2\x92\xa6\x80\x26\x85\x1c\x0f\xe0\x48\x78\x40\x69\x5a\xd4\xc2\x12\x9e\xc3\xbf\xac\x23\x50\x66\x6d\x97\x90\x79\x5f\xf0\x72\xb1\xd8\x28\x5f\x7b\x7a\x62\xf3\xbc\x34\xca\x1f\x16\xc1\x5f\xd5\xaa\x14\xe3\x2e\x52\xda\x91\x5e\xb0\xda\xcc\xd0\x25\x99\xf2\x94\xf8\xd2\xd1\x02\x0b\x35\x0b\x82\x9b\xe0\xe8\xf3\x3c\xfd\xc9\x55\x61\xc1\xd7\x2d\x49\xa3\x3d\xd8\x3b\x65\x36\xcd\x74\x70\xb8\x5e\xdc\xc5\xef\x40\x89\x91\xe3\xb6\x28\xff\x11\x5e\x99\x12\x54\x3e\xbd\x7f\x78\x84\x9a\x69\x30\x41\x17\xf3\x80\x76\xcb\x6b\x8e\xc0\x0b\x50\xca\xac\xc9\x45\xc3\xad\x9d\xcd\x03\x45\x32\x69\x61\x95\xf1\xe1\x47\xa2\x15\x99\x2e\xe8\x5c\xae\x72\xe5\xc5\xd2\xbf\x97\xc4\x5e\xec\x33\x87\xdb\x10\xef\xb0\x22\x28\x8b\x14\x3d\xa5\x73\xb8\x33\x70\x8b\x39\xe9\x5b\x64\xfa\xe1\xb0\x0b\xc2\x3c\x13\x48\xc7\x81\x6f\xa7\xa9\xee\xc2\x4e\xc4\x00\xd4\x39\xe4\xac\x85\xea\x88\x7c\x28\x28\xe9\x84\x46\x4a\xac\x9c\xb8\xaf\x47\x4f\xe2\xf4\x9d\x9c\x32\xc4\xce\xa3\x2f\x79\x9c\x61\x58\x56\x3d\x5b\x55\x4c\xed\x8a\xc5\xac\x2d\xae\x12\x6a\xcf\x19\x43\x88\x11\x31\x5f\x62\xf3\xc2\x9a\xe0\x12\x63\x82\x9d\xcf\x1e\x10\x92\x7a\x4d\xa4\x3b\x7f\x22\xfa\x6d\xb3\xac\x25\x76\x13\x29\xe0\x33\xf4\x91\x14\x53\xd4\xe6\x24\x9f\x8d\x48\x27\x43\xbc\x51\x60\x3f\x95\x63\x06\x1a\x57\xa4\x1f\x48\x53\x72\x0a\x45\xbf\x5e\x32\x3a\xfb\x9e\x3f\x3e\x51\xf1\xb7\xf6\xea\x18\xb8\x81\x00\xfc\x5e\x92\x3b\x80\xdd\x91\x93\x58\x26\x2f\xb6\x69\x54\x3f\x43\x15\xa0\x64\x4a\x25\xe9\x71\xa0\xd6\x01\xe4\xba\x6d\xb7\x33\x9b\x07\x00\x1a\x53\x57\x46\x8e\x3e\xc9\xde\x3f\x49\xae\xe0\x63\xe5\x1e\xd1\xfc\x74\x53\xa5\xbc\xe2\xa0\x6a\x04\x81\x6b\x60\x2a\x33\xe5\xa7\x7e\xd7\x1d\x8f\x19\x75\x56\x02\x3a\x82\x9b\xfb\x77\x94\xf6\xed\x89\x8a\xa3\x73\x78\xe8\x59\xa1\x3c\xe5\xbd\x0a\x9d\xa8\x74\x33\x20\x76\x95\x94\xeb\x27\xe2\xbb\xbd\x44\x63\xd7\x83\xca\x70\x55\x7e\xa6\x80\xb0\xa5\x43\xac\x54\xad\x08\x8d\x21\xe0\x28\xd4\x38\xb1\xf8\x00\xc9\x2d\x1d\xc2\xf6\xaa\xa0\xf5\xae\x1c\xf1\x85\x38\xfa\xc2\xe6\x38\x66\xc2\x70\xe0\xe9\xd9\x24\xd3\x1e\x63\x4e\xd7\x68\x35\xf4\xf8\xc4\x40\x82\x81\xe2\xaa\x2d\x10\x4b\xc9\x44\xc0\x50\xa6\x1a\xe3\x60\x51\x68\xd5\x13\x65\xc7\xe1\x6d\x3f\x88\xd0\x57\x44\x9e\x8f\x1a\x88\x17\xa8\x61\xcf\x76\x75\x5b\x3a\x5c\x73\x74\x06\x89\xa7\x4c\x15\xa3\x0a\x1c\x53\x4b\xdd\xe6\x7c\x91\x26\xf3\xd8\x9e\x86\x08\xba\x33\x53\xb8\xb7\x5e\xfe\xbc\x7f\x52\x7c\x36\x83\xb4\x87\x78\xd9\x3b\x4b\x7c\x6f\x7d\x58\xff\x2a\x30\x45\x01\x5f\x00\x52\xdc\x10\xc2\xce\xc4\x08\x17\x3d\xdb\x5d\x11\xcf\xe1\x6e\x3d\x12\x35\xd0\xb2\x90\xd0\xba\x33\x60\x5d\x8d\x46\xe8\x68\x23\x9b\xc8\x20\x2f\x39\x34\x33\xc6\x9a\x19\xe5\x85\x3f\x0c\xab\x0e\x15\xff\x0e\x87\x08\xb1\x70\x69\x63\xd8\x66\x36\x06\x7f\x47\x94\x28\x06\x3c\x4a\xaf\x16\x9f\xc4\x6e\x5b\x63\x42\x29\xa4\x65\x80\x03\x47\x48\xb2\x77\xe8\x69\xa3\x12\xc8\xc9\x6d\x08\x0a\xc9\xdd\x97\x98\x75\x28\xb3\xc6\x31\x92\x5f\xdb\xc4\x06\x7c\x24\x14\x93\x50\x4b\x5f\x50\x7c\xe2\xfa\x98\x98\x73\x2c\xc4\x3d\xfe\x90\x2c\x1b\x50\xfe\x13\x0a\x54\x8e\xe7\x70\x13\x8e\x67\xba\xdf\x49\xda\x7b\x94\x09\x76\x6a\x93\x17\xca\x8a\x41\x12\xe6\x0e\xb5\x54\x02\x89\x3b\x03\xa4\x43\x5d\xe8\x25\x6b\xd7\xcf\x4a\xe4\x14\xf6\x99\xf4\x3a\x92\xb5\xd6\x8a\x74\xe8\xf4\x27\x5b\x3a\x4c\xa6\x1d\x37\xea\xa5\x29\xcb\xef\xcc\x24\xd6\x91\x67\xde\xdb\x14\x1d\x6b\xf4\x01\x26\xe1\xd9\x64\xfe\xac\xaa\xf6\x52\xbf\xb0\xda\x0e\x96\x16\x4c\xd3\x70\x29\x80\xfa\xe3\x05\xf9\x7f\xd0\x2f\x1c\xad\xcf\x6e\xed\x38\xc2\x27\x5a\xc7\x24\xd7\x6a\xb2\xd6\xe4\xc8\x84\x06\xd3\xf6\x76\x51\x67\x45\x6a\xfa\xb3\x69\x55\x4b\x28\x85\xbd\xf2\x59\xb7\xcb\x3b\x87\xce\x70\xb4\x0c\xc4\x48\x57\x19\x95\x64\x9f\x6a\xf1\xa3\x5f\x37\xda\xc4\x6c\x5f\x4b\x38\x05\x32\x4e\x25\x19\xa5\x3d\xd8\x06\xb1\xa5\xdf\x8f\xa7\x0b\xf1\x8d\x68\x98\x9e\xce\x61\xd4\xb4\xe3\xe5\xfc\xfc\xb5\xc2\x80\xba\x37\x1f\xef\xea\x0b\x85\x78\x8f\x40\xb5\xba\x03\x25\xe7\x82\x72\x73\xd4\xf5\x42\x41\x6e\x9b\x0d\xed\x8a\x7c\xbc\x7c\x38\x9e\xae\x86\x8b\x4d\xe3\x5d\x63\xe2\x0f\xa7\xd5\xd1\x94\x7a\x5e\xf8\xa3\xec\x6d\xd1\x71\x87\x4a\xe3\x4a\xd7\x67\xc5\xd8\x5c\x0c\xe6\xeb\x78\x8a\x6c\x94\xb9\x8e\x4e\x44\x43\xf5\xe2\xa2\xa6\xf3\x92\xb6\x53\x5a\xcb\xe8\xb2\x83\x4b\x84\xdf\xc0\x82\xcb\x7a\x4f\x39\xeb\xb1\x7f\x74\x68\x58\xd5\x57\x8e\x63\x95\xec\xe4\xf0\xc7\x1e\xbc\xca\xe3\xe9\xb5\x71\x3b\xf0\x0d\x49\x4a\xc3\x05\xcb\x08\x51\x00\x6b\xa8\x8e\xd4\x50\x5d\xac\xcf\xe8\x6c\xa2\x69\x8f\x0b\x3b\x2f\x19\x6b\xeb\x72\xf4\x4b\x48\xd1\xd3\x4c\x24\x1e\x85\xe5\x73\xb8\xcf\x79\x35\x48\xf6\xc8\x62\x95\x55\x6f\xc2\xfa\xe1\x4a\xe5\xc4\x8c\x9b\x97\x69\x73\x03\x59\x99\xa3\x44\x16\xa6\x21\x86\x2a\x22\xa0\x4c\xaa\x12\x0c\xb7\x71\x29\x79\x54\x7a\xac\xa9\x03\xc0\x95\x2d\x63\x64\x1e\xdd\xe3\xd5\x2c\xec\x08\x79\x28\xef\x9e\xd1\x2d\xb6\x05\xb2\x4d\x80\xec\x9a\xeb\x9a\x83\x0f\xfc\x08\x49\x9f\xdf\x76\x8d\x4a\x5a\xdd\x7c\x55\x25\xa2\x11\x72\x1a\xa2\xc6\xae\xe1\xd1\x95\x34\x85\x5f\x51\x33\x4d\x47\xcd\xf0\xd9\x6c\x8d\xdd\xbf\x9e\x3e\x61\xe1\x8b\x70\x3f\x14\x41\xea\x46\x8f\x57\x10\x25\x34\x93\x1f\xd1\x67\x17\x16\xbc\xeb\xbb\xaa\x4f\x0a\xa7\x87\xd0\x59\x14\x8a\x12\xea\xdc\xd5\x83\x32\xec\x09\x87\x02\x36\x2e\x27\xe3\x95\xa3\x6a\xd7\x34\x5e\x2b\x57\xe7\xb4\xe3\x2d\xbf\x74\xa6\x80\xf1\x25\xc8\x00\xc5\x7f\x3c\x7c\xb8\x5f\xfc\xdd\x56\xed\x31\x26\x09\x71\x55\x7e\xa4\x77\x9d\x02\x97\x49\x06\xc8\xf5\x6d\xeb\x43\x28\x4c\x39\x1a\xb5\x26\xf6\xf3\x8a\x0f\x39\xfe\xfa\xf6\xdb\x10\xae\xbf\x5a\x07\xf4\x84\x79\xa1\x69\x0a\xaa\x3a\xc4\xd5\x77\xe6\xad\x06\x2c\x40\xd3\x50\x0d\x5d\x95\x32\x03\x74\x11\x0a\x9b\x56\x10\xec\x83\xea\x1e\xb7\x04\xb6\x52\xbd\x24\xd0\x6a\x4b\x4b\x98\x70\x41\x49\x4b\xdc\x3f\x0c\xe6\xf4\xe7\x64\x80\xf2\xff\xed\x33\x72\x04\x13\x59\x38\x89\x42\x36\x0d\xae\xcc\xb5\xc2\xa3\x12\x36\xde\x8b\x38\xb5\xd9\x90\x1b\x4c\xbb\xe1\xf6\x7f\x47\xc6\xff\x2c\xa7\x56\xb5\x06\x63\x5b\x64\x02\x71\xb1\x68\x41\x89\x5a\x2b\x4a\x9f\x09\xff\xf5\xed\xb7\x41\xc9\xbb\xf8\x49\xee\xa4\x27\x78\x1b\xcf\x5a\x8a\x05\xb1\x9f\xab\x03\x2e\x1f\x8c\xc7\x27\xe1\x96\xc8\x41\x69\x08\xe9\x70\xc6\xf1\x16\x32\xdc\x11\xb0\xcd\x09\xf6\xa4\xf5\x2c\xde\xc8\xa7\xb0\x8f\xd7\x05\xb5\x31\xc3\x21\x19\x0a\x74\x43\xdd\x49\xf7\x45\xd5\xe3\x87\x77\x1f\x96\x51\x42\x71\xb6\x8d\x11\xb1\x8c\xf5\xb0\x56\x06\x75\x75\x02\x53\x1c\x3d\x75\x80\xaa\xa8\x55\x46\xd7\xf2\x16\x92\x0c\x4d\xa8\x1f\x01\xf5\x75\xe9\x4b\x47\xf3\xeb\xef\xe9\x76\x4f\xdf\x27\x75\xc7\x99\xb7\x4b\xa7\xad\xf6\xff\xe8\x1d\xcd\x8b\x15\x0d\xef\x7f\x2f\x54\xf4\xbe\x15\x11\x83\x8a\x6e\xcb\x15\x39\x43\x9e\x82\xae\xa9\x4d\x58\xd4\x4c\xa8\xf0\xbc\xb0\x3b\x72\x3b\x45\xfb\xc5\xde\xba\xad\x32\x9b\x99\xb8\xef\x2c\xfa\x07\x2f\xc2\x3b\xe4\xc5\x4f\xe1\xcf\x77\xeb\xc5\x05\x26\x2f\x52\x2e\x6c\xf8\x2b\x34\x14\x3e\xbc\xf8\x2e\x05\xeb\x43\xeb\xcb\x4e\x87\xd7\x0f\x31\xe1\x24\xa7\xfb\x25\x8c\xf6\x99\x4a\xb2\xfa\xcd\x73\x95\xaf\x87\x63\x30\xc7\x34\xa6\x7a\x34\x87\x1f\xee\xf2\x02\x70\xe9\x44\xaa\xc3\xac\xfa\x00\x62\x86\x26\x95\xff\x59\xb1\x97\xf9\xef\x42\xb4\x54\x17\x87\xfc\xe7\xbb\x77\x7f\x4d\x20\x94\xea\x05\xf1\x7d\xfc\xee\xe3\x2d\xea\x22\xc3\x37\xc7\xb9\xd0\x1b\xce\xaa\xaf\x3d\x5a\x8f\x01\xe2\xab\xce\x25\x78\x57\x46\x63\xb3\xb7\x4e\x5a\xfa\x38\x73\xec\x2c\xa5\x6d\x28\x3c\xa5\xf7\xa7\x9f\x79\x4c\x62\x9d\xaa\xbf\xe2\x08\x3f\x5b\xb7\x05\xf0\xf5\xdb\x55\xa4\x4a\xe9\x97\x5a\x1a\x99\xfc\x6f\x00\x00\x00\xff\xff\xa3\x4b\xc0\x37\x2a\x23\x00\x00") + +func operatorsCoreosCom_operatorsYamlBytes() ([]byte, error) { + return bindataRead( + _operatorsCoreosCom_operatorsYaml, + "operators.coreos.com_operators.yaml", + ) +} + +func operatorsCoreosCom_operatorsYaml() (*asset, error) { + bytes, err := operatorsCoreosCom_operatorsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operators.coreos.com_operators.yaml", size: 9002, mode: os.FileMode(420), modTime: time.Unix(1586375941, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +var _operatorsCoreosCom_subscriptionsYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x7d\x7f\x73\xe3\xb8\x91\xe8\xff\xf9\x14\x28\x6f\xaa\xec\x79\x67\xc9\x3b\xb9\xad\x5c\x9e\x2f\x95\x94\xd7\xf6\x6c\x7c\xbb\x33\xe3\x1b\xcd\xcc\xd6\x7b\xb9\xbc\x14\x44\x42\x12\x62\x12\xe0\x02\xa0\x6d\xe5\x72\xdf\xfd\x15\x1a\x3f\x08\x4a\x24\x01\xca\xb2\xc7\xbb\x31\xfe\x99\xb1\x44\xb6\x80\x46\xa3\xbb\xd1\x3f\x71\x45\x3f\x13\x21\x29\x67\xa7\x08\x57\x94\xdc\x2b\xc2\xf4\x5f\x72\x7a\xf3\x3b\x39\xa5\xfc\xe4\xf6\xf5\x9c\x28\xfc\xfa\x57\x37\x94\xe5\xa7\xe8\xbc\x96\x8a\x97\x1f\x88\xe4\xb5\xc8\xc8\x05\x59\x50\x46\x15\xe5\xec\x57\x25\x51\x38\xc7\x0a\x9f\xfe\x0a\x21\x86\x4b\x72\x8a\x64\x3d\x97\x99\xa0\x95\x02\x68\xbc\x22\x02\x2b\x2e\xe4\x34\xe3\x82\x70\xfd\x4f\xf9\x2b\x84\x30\x63\x5c\x61\x78\x44\xbf\x89\x50\x4e\x65\x55\xe0\xf5\x3b\x00\x31\x0b\x40\x98\x6f\x89\xff\xdb\x7f\x3b\x27\x12\x49\x22\x6e\x69\x46\x50\x86\x15\x2e\xf8\x12\x29\x8e\x30\x32\x73\x44\x98\xe5\x28\x5b\x61\xc6\x48\xa1\x3f\x17\x24\x23\xf4\x96\xa0\xba\xca\xb1\x22\x12\xc0\x22\xb4\xe0\x02\x55\x38\xbb\xc1\x4b\x22\xa7\xbf\x92\x15\xc9\xf4\x74\x96\x82\xd7\xd5\x29\xea\x99\xfb\xad\xc3\xdb\xed\x6b\x5c\x54\x2b\xfc\xba\xf9\x0c\x16\x33\xb1\x88\x08\xbe\x46\x30\x53\x92\x9f\x22\x25\x6a\x62\x3e\x50\x5c\xe0\x25\xf1\x9f\xc8\x8c\x57\xe4\x14\x69\x04\xc8\x0a\x67\x24\xb7\x08\xb5\xf8\xa9\x8a\x5a\xe0\x62\x03\xbb\x06\x10\x65\xcb\xba\xc0\xa2\xfd\x1d\x7c\x65\xf6\x6e\x0b\x9b\x05\x95\xea\xfb\xad\xaf\x7e\xa0\x52\x19\x80\x2b\x2e\xd4\xbb\xe6\xa7\x27\x1a\x70\xf3\x3f\xf3\xab\x19\x56\x64\xc9\x05\x6d\x1e\xe2\x05\x6c\x6c\x9e\x03\x61\xe0\xe2\x5a\x50\xa6\x88\x38\xe7\x45\x5d\xb6\x11\x73\x6d\x10\x0e\xef\xa9\xb5\x5e\xb5\x54\x82\xb2\xe5\xf6\x56\x7f\x5c\x11\xb7\x3d\x6e\x75\x73\x92\x23\xc5\xe1\xd1\xff\x98\xbd\x7f\x77\x8d\xd5\xea\x14\x4d\xf5\xce\x4d\x35\xf4\xe0\x67\x66\x40\x07\x89\xbf\xe2\x28\xc8\x12\x8f\xa6\x0b\xb5\x22\x48\xc3\xa5\x0b\x4a\x72\x37\x8d\xce\x1f\x96\xee\x97\xdc\x4f\x9f\x1b\xba\x4b\xfd\x6d\x4b\xa5\x7c\xe1\xa8\x53\x13\xac\x5f\x6f\xdf\x72\x33\xff\x23\x95\x20\x40\x5f\x9f\xd8\x0d\xe3\x77\xec\x0d\x25\x45\x2e\x4f\xd1\x02\x17\x12\x48\xab\x9e\x0b\x7b\x72\xed\x6e\x7d\x85\xa4\xc2\xaa\x96\x88\x30\x3c\x2f\xf4\xef\xe9\xb5\x9a\x8f\x82\xa7\xa7\x96\x52\xf5\xe7\xa7\xe8\xbf\xff\x47\x13\x3a\x2e\x68\x0e\x07\xd7\x40\xe2\x15\x61\x67\xd7\x57\x9f\xff\x75\x96\xad\x48\x89\x4f\xed\xb9\xea\x3a\xb0\xf0\x17\xba\x21\xa4\x92\xcd\xd1\x42\x75\xa5\x17\xab\x57\x8d\xe6\x6b\xa4\x04\xce\x6e\x28\x5b\x02\x4e\x96\x80\x08\x0b\x11\xa1\x73\xb3\x45\x72\x6a\x3f\x31\x88\xe5\xf3\xbf\x91\x4c\xd9\x8f\x04\xf9\xa9\xa6\x82\xe4\x6e\x1a\x13\xe4\xf8\x93\xff\x40\xa3\xce\xfe\x51\x09\x3d\x0d\xe5\x69\x58\x8f\x80\x2f\xfa\xcf\x36\x96\x73\xa8\xd7\x6b\x9e\x41\xb9\xe6\x84\x16\x7f\x96\x07\x90\x1c\x49\xc0\x85\xde\x4f\xb5\xa2\x12\x09\x02\x1b\xc4\x0c\xc3\x0b\xc0\x22\xfd\x08\x66\x76\x0d\x53\x34\xd3\x9b\x28\xa4\x3e\x81\x75\x91\xa3\x8c\xb3\x5b\x22\x94\x66\x5d\x7c\xc9\xe8\xdf\x3d\x64\xa0\x0f\xfd\x93\x85\x26\x16\xd5\x82\x08\x87\x8e\xe1\x42\xef\x54\x4d\x8e\x81\x0d\x96\x78\x8d\x04\xd1\xbf\x81\x6a\x16\x40\x83\x47\xe4\x14\xbd\xe5\x82\x20\xca\x16\xfc\x14\xad\x94\xaa\xe4\xe9\xc9\xc9\x92\x2a\x27\x09\x32\x5e\x96\x35\xa3\x6a\x7d\x92\x71\xa6\x04\x9d\xd7\x7a\xe3\x4e\x72\x72\x4b\x8a\x13\x49\x97\x13\x2c\xb2\x15\x55\x24\x53\xb5\x20\x27\xb8\xa2\x13\x98\x38\x33\x02\xa0\xcc\xbf\xf2\xc4\x77\x18\xcc\x74\xeb\x58\x78\x86\xd5\x8b\x77\xcd\xb3\x10\x95\x9a\xc5\xc3\x6b\x66\xfe\x0d\x7a\xf5\x47\x1a\x2b\x1f\x2e\x67\x1f\x91\xfb\x51\xd8\x82\x36\xce\x01\xdb\xcd\x6b\xb2\x41\xbc\x46\x14\x65\x0b\x22\xcc\xc6\x2d\x04\x2f\x01\x22\x61\x79\xc5\x29\x53\xf0\x47\x56\x50\xc2\xda\x48\x97\xf5\xbc\xa4\x4a\x02\xfd\x11\xa9\xf4\xfe\x4c\xd1\x39\x08\x39\x34\x77\x32\x27\x9f\xa2\x2b\x86\xce\x71\x49\x8a\x73\x2c\xc9\xa3\xa3\x5d\x63\x58\x4e\x34\x4a\xe3\x88\x0f\xc5\x78\xfb\xc1\xd6\xf9\x42\xc8\x89\xc8\xce\x1d\x0a\x0f\xfa\xac\x22\x99\x3f\x1e\x98\xa1\xb3\xaa\x2a\x68\x06\x27\x00\xa9\x15\x56\x28\xc3\x4c\xe3\x86\x32\xa9\x70\x51\x80\xbc\x1b\xfc\xe5\xed\xd3\x6d\x0e\xb4\xe5\xfa\xcd\x07\xb2\xe1\xfb\xed\x8f\xbc\x6c\x0d\xbe\xeb\xe2\x01\x7a\x58\xe6\xda\xfe\xb0\x07\x79\xf0\x3c\x67\x0b\xba\xdc\x7c\xbc\x17\x3b\xe7\xf0\xb8\x7e\x4b\x61\xca\xa4\x7d\xbd\x16\x06\x3f\x8d\xd4\x59\x70\xb1\x01\x12\x69\xf2\x0f\x20\x4d\x3b\x67\xb8\x85\xbb\xa1\xb5\xea\x41\xd8\xed\xf6\x87\x1b\x0b\xb8\x64\xb7\xe6\xf8\x69\x05\x42\xb3\x2e\xc2\x6e\xa9\xe0\xac\x24\x4c\xa1\x5b\x2c\xa8\x95\x25\x1c\x49\xa2\x10\xd5\xfb\x4c\x3a\x60\x22\xb7\x6c\x22\xba\x0e\x49\xc7\x1b\x66\x4d\x58\x08\xbc\xee\xf8\x96\x2a\x52\x76\xac\xa8\x6b\xfa\x9f\xb1\x08\xce\xbd\x26\xcb\xae\x25\x20\xfb\x40\x27\x48\xcd\x61\x11\x46\xe7\x7e\x09\x9d\x4f\x0d\xec\x82\x19\x5d\xb4\xdc\x8c\x2d\xaa\x6e\xc6\xd0\x26\x9a\x01\x2a\x48\xcf\x77\x1b\x28\xd1\x27\xc2\x88\x29\xd2\x89\x89\x29\x7a\x5b\x4b\xd8\x1d\x8c\xce\xff\x7a\x75\x71\xf9\xee\xe3\xd5\x9b\xab\xcb\x0f\xdd\x8b\x46\x43\x07\xa4\x19\xc0\xb5\x13\x27\x78\xf8\xd9\xed\x89\x20\x0b\x22\x08\xcb\x88\x44\xbf\x3e\xfa\x7c\xf6\xe1\xaf\xef\xce\xde\x5e\xbe\x42\x58\x10\x44\xee\x2b\xcc\xf2\x16\x07\xd9\x1c\xb5\x74\xc2\xa1\x12\xe4\x96\xf2\x5a\x5a\xde\x94\xf7\x10\xf1\x00\xf5\x9a\xe1\x69\x18\x24\x2c\x66\x6b\x7f\x19\xe9\x04\x38\x45\x57\x0b\x84\xfd\xdf\x43\x80\xfd\x89\xd0\x22\xac\xb8\x25\xf9\x31\x4c\xdc\x23\xc0\xce\x0d\x51\x56\xd5\xca\x89\xc2\x3b\x5a\x14\x03\x40\xf5\xf9\x62\x46\xa7\xca\xa7\xa0\x74\x86\x48\x94\x6b\xa6\xf0\xbd\xe3\xc8\x44\x66\xb8\x22\x39\xba\xa3\x6a\x35\x00\x12\xa3\x9c\xd7\x7a\x67\x7e\xfd\xeb\x63\x44\xc9\x29\xfa\x75\x00\x72\x8a\x2e\x2d\x94\x60\xdf\xf4\x1c\x11\x23\xb7\x64\x9b\xa7\xb5\x66\xea\xf6\xf3\x18\x09\xb2\xc4\x22\x2f\x88\x94\x9a\x4a\xef\x56\x44\xad\x88\x51\xcc\xfd\x59\x25\xf7\x54\x0b\xdc\x0e\x3e\xd9\x0c\xc6\xd5\x14\x5d\x90\x05\xae\x0b\x90\xcd\xe8\xe0\x60\x7a\xf8\x60\x12\x7e\x23\x78\x99\x48\xc6\xb3\xf6\xad\xa2\x8b\x42\x0e\xa5\x81\xda\x7f\xb4\x50\xc8\x2c\x25\xc9\x11\x5d\x58\x0d\x88\x4a\xbd\x44\x44\xca\x4a\xad\x63\x47\x73\x80\x27\xa1\x24\xf6\x82\xbc\xb4\x7b\x8b\xab\xef\xc9\xfa\x03\x59\x0c\x3d\xba\x89\x09\x52\x90\x4c\xb3\x5e\x74\x43\xd6\xa0\xfe\xa2\x73\x07\x6c\x68\xe9\x89\xd3\x47\x51\xd6\xea\xc6\x44\x4f\x60\xf0\x89\x34\x54\xe8\x71\x43\xd6\xb1\x47\x3a\xae\x7d\x1a\x01\x20\x2b\x35\x46\x86\xd7\x8e\xd2\xc8\xd2\x8d\x61\x09\xd0\x39\xa1\xc3\x50\x14\xd8\x33\xab\x3a\x55\xd4\x9b\x7a\x4e\x04\x23\x8a\x80\x96\x9a\xf3\x4c\x6a\x05\x35\x23\x95\x92\x27\xfc\x56\xf3\x40\x72\x77\x72\xc7\x85\xbe\xc6\x4d\x34\x0f\x99\x98\x3d\x93\x27\x60\xce\x38\xf9\x0a\xfe\x89\xce\x0e\xa1\x8f\xef\x2f\xde\x9f\xa2\xb3\x3c\x47\x1c\x8e\x7d\x2d\xc9\xa2\x2e\xd0\x02\x6e\xb7\xd3\xe0\xa6\x76\x0c\xf7\x86\xe3\x04\x90\x35\xcd\xff\xd8\x7f\xf0\xdd\x18\x81\x69\x5e\x19\x63\xc7\x48\x6c\xcf\x40\xc1\x5b\xb7\x38\x9a\x3f\x06\x88\x0b\x44\x55\x0a\x86\x34\x05\x95\x56\x40\x5b\x71\x96\xb8\xb8\x39\xe7\x05\xc1\x6c\xe0\x69\x40\xf3\xb8\xb3\x7d\xd8\x1c\x6e\x78\xdb\x91\x53\xc5\xf3\x53\x24\xeb\xaa\xe2\x42\x49\x7f\xd5\x00\x73\x4d\x6c\xd3\x5a\x0f\x83\xee\x7e\xdc\x7c\x56\xe0\x39\x29\x64\xf0\x41\x60\x55\x8c\x01\x36\x06\x23\x9e\xc3\xa5\xe0\xd8\xfc\x69\x05\xf8\x59\x96\xf1\x9a\x29\xfb\x05\x18\x3f\xa6\x2b\x2e\xd5\xd5\x75\x14\xa8\x79\xb8\xe2\xf9\xd5\xf5\x71\xeb\x2f\x39\x20\x6f\xd0\x63\x30\x37\xd8\x80\x6b\x3c\x28\xc1\xc7\xb0\xb8\x6e\xbb\x48\xf7\x68\x11\x85\xb3\x94\x58\x5a\xb0\xa6\x11\xfd\xdf\x37\x6e\x8a\x88\xa6\x10\xfb\x9d\xa0\x4a\x11\x06\xda\x0f\x11\xa5\xd6\x08\x8e\x35\xd5\x37\x42\xfd\xf6\xf5\xc1\x5e\xd9\xa8\xc7\xe1\xc8\x25\xc3\x9a\xec\x7a\xcd\x41\xf0\x6c\xde\xe9\x6e\xfe\x86\x97\xb0\xf0\xb3\xeb\x2b\x67\x5b\xda\xdb\xf2\x9c\x75\xe4\xcd\x83\x4e\xb9\xb7\xb1\xd8\xc5\x7a\x9d\xf8\x14\x71\x56\x0c\x4b\xd7\x66\x0e\x12\x15\x14\xac\x27\x5a\x91\xf6\x16\x94\x23\xf3\xe1\x34\xab\xea\x63\xfb\xc0\xb4\x24\x25\x17\xeb\xd8\x29\xb4\x0f\x93\x6a\x45\x4a\x22\x70\x31\xb1\xa6\xf6\x63\x0f\xdc\x00\xf5\x7f\x19\xb0\xb1\x03\x10\x4c\x6e\x1b\xb6\xb9\x88\x64\xb5\xd0\x62\xb3\x58\x3b\x6e\x47\xf2\xa7\x3e\xf7\x0e\xa9\x7b\x3a\xf6\x7e\x47\xdf\xed\xa0\x52\xf8\x3b\xb2\xb5\x8a\xbb\x35\x80\x1e\x7c\xcb\x8b\xba\x24\x51\x3e\x8d\x02\x21\x0b\xef\x11\x76\xab\x75\x66\xb9\x57\x31\x9e\xd3\x5b\x2a\xb9\xd8\x49\x8a\x53\x6b\xfe\xe5\xb5\xd2\xf7\xb1\x05\x17\x25\x56\xfe\x52\x7d\x5f\x71\x99\x74\xca\xfd\x69\xd8\x60\x6a\xaf\x0f\xa2\x2f\x57\x58\x29\x22\xd8\x29\xfa\x7f\x47\xff\xf5\x2f\xff\x98\xbc\xfa\xe3\xd1\xd1\x9f\xbf\x9e\xfc\xef\xbf\xfc\xcb\xd1\x7f\x4d\xe1\x3f\xff\xeb\xd5\x1f\x5f\xfd\xc3\xfd\xf1\x2f\xaf\x5e\x1d\x1d\xfd\xf9\xfb\xb7\xdf\x7d\xbc\xbe\xfc\x0b\x7d\xf5\x8f\x3f\xb3\xba\xbc\x31\x7f\xfd\xe3\xe8\xcf\xe4\xf2\x2f\x89\x40\x5e\xbd\xfa\xe3\xaf\xa3\x53\xc3\x6c\xfd\x3e\xc2\x5c\x10\xd0\xad\xd9\x2c\xca\x14\x59\x0e\x5e\x0f\xdb\xcf\x27\x6d\x2e\x42\xf7\x93\x46\x69\x9d\x50\xa6\x26\x5c\x4c\xcc\xab\x81\x43\xae\x7f\xb8\xad\x19\x4b\xff\x1f\xdc\x99\x0d\x8c\xd1\x4e\x10\xec\x8d\x7c\x25\xc9\x04\x51\xfb\xb8\x87\x19\x48\x4e\x42\x55\x3c\x3f\x94\x88\x75\x98\x4c\xfb\xa6\xfa\x0b\xbb\x9a\x39\x85\xc5\x60\xa5\x91\xe0\x0b\xc1\xcb\x29\x02\xc3\x58\xc2\xb1\x06\xd3\x19\x38\xcd\x1c\xa4\x1b\x32\x70\x47\x77\xe3\xe5\xb2\xf7\xcb\xbe\xec\xcd\x0c\x2d\x98\x9b\x5e\x94\xdc\xcd\xd8\xef\x4d\x8f\xb0\xdb\x3e\x03\xd6\xa6\xd5\x5c\x3f\xd7\x36\xfc\x3b\xc5\x4d\x71\x54\xf1\xaa\x2e\xb0\x6a\x19\xb5\x3a\xa7\xb6\x69\x5b\x0d\xbd\x00\xf6\xcc\x35\x46\x59\x4d\x4a\x60\x67\xb7\x7c\xb3\xec\x3b\x6d\xdb\xa6\x69\x74\x56\x14\x88\x32\x73\xe6\x00\x28\xd8\x1d\xc1\x9e\x6a\x74\x32\x84\x8d\xe9\xff\xb6\x6f\xaa\x77\x2b\xb2\x31\x45\xbd\x7c\xa9\xb0\x50\x94\x2d\xa7\xe8\x47\xfd\xbd\xe1\x9c\xd6\xfc\x48\x19\x2a\xeb\x42\xd1\xaa\xc7\xb4\xeb\x65\xbb\xb1\x5d\x16\x35\x41\x58\x4a\x9e\x51\xac\xec\x6a\xad\x3f\x57\x2a\xb7\x64\x98\xb5\xc2\x37\x60\xb2\xce\x48\x4e\x58\xd6\x63\x1c\xfc\x0c\x6e\x5c\x8f\xbb\xf9\x5a\xaf\xee\x92\xdd\x1a\xb8\x18\xe5\xb5\x71\xbd\x19\xbe\x96\x0e\xf7\xaa\x2c\x6b\x05\x6e\x80\xc7\xf4\xc9\x68\xea\xb2\xf6\xd1\xc0\x35\x03\xac\xd7\x5f\x2e\x30\x78\x94\xf8\xa2\x31\x92\x74\xf3\x94\xa8\x20\x8a\x0b\x0e\x6f\xda\x1c\x94\xa7\x5b\x12\xa3\xb1\xde\xb4\x25\xc5\x53\x58\x63\xe3\x12\xe0\x39\x73\xff\xb1\x9c\x3f\x81\xaf\x27\xf2\xf4\x34\x7e\x3e\xc2\x70\x37\x86\x43\xa7\xd8\xe1\x2a\x41\x16\xf4\x3e\x91\x0a\xcf\x58\x73\x51\xa2\x39\x61\x4a\x5f\x4d\x04\xb0\x68\x41\x2a\xc2\xc0\x0a\x41\x70\xb6\x8a\x88\x1b\xcb\x9d\x1b\xab\xfc\x63\xf8\x00\x8d\x2e\x34\xee\x88\xcd\xba\x34\xb1\x97\xf3\xf5\x8b\x3f\x5f\x76\xdf\xf7\x77\xb8\x18\xcf\x89\xb9\xff\x74\x5f\xf9\x37\x76\x33\x78\xda\x46\x1f\xb9\xbf\xee\x56\x34\x5b\xf9\x89\xe9\x9b\x64\x67\xc0\x06\xcc\xc8\x5c\xa9\x34\xf5\x2e\xa8\x42\x5c\xeb\x0f\x7a\x1e\x53\x34\xeb\x80\x56\x62\x95\xad\xec\x13\x87\x87\xdd\x3b\x6c\xac\xdf\xde\xa7\x68\x81\xcf\x8d\x89\x35\xaf\x0b\x92\x23\x17\x6a\x63\x7e\x68\x24\x01\xb6\x42\x52\x4e\xb0\x94\x74\xc9\x26\x15\xcf\x27\x1a\xda\x49\x17\x81\x44\x8e\x5c\x18\x1a\x3a\x7c\xec\x06\xe9\x6b\x23\x94\x71\x68\xeb\x3e\x78\x0b\x63\xa0\x59\x64\xbc\xac\x6a\x45\x02\xf3\xa3\xb7\x4b\xcd\xbb\xf9\x22\xc4\x82\x05\xda\x6a\xa3\x17\x3d\x0c\xa7\x25\x66\x78\x49\x26\x76\x42\x13\x3f\xa1\x89\xff\xad\x5d\xd0\x1c\xe3\x69\xc6\x3c\xda\x77\x26\xdb\x08\xfc\xc1\x18\x66\xcd\x87\x73\x6b\xe8\x2a\xf1\x3d\x2d\xeb\x12\xe1\x92\xd7\x0c\xb4\x32\xbb\x82\xde\x23\xd9\xa0\x1a\x17\x05\xbf\x23\xf9\x97\x41\x5c\x02\xf2\xd0\x28\x3a\x45\xcf\xd3\xf8\x16\x31\xba\xa5\x1a\xdb\x12\x8d\x6c\x3b\x19\xd7\x9c\x25\x3d\x8d\x0c\x3f\x38\xa7\xc0\x06\x21\x52\xb6\x33\x21\xba\x33\x0f\x71\x39\x1e\x3e\x95\x88\x97\x54\x29\x6b\xa3\xc6\xcd\xa9\xef\x37\x38\x50\xd5\x32\xd8\xda\x23\x43\x17\x86\xf5\x52\x89\xc8\xbd\xbe\x7e\x51\x70\x0b\x38\xb7\xcf\xb1\x11\xc6\x77\x54\xf6\x4f\x56\x71\x7d\x8f\xa3\x65\x55\x90\xd2\x05\x0f\x4f\xdc\x25\xcf\xc4\x8a\xbc\x1c\xa3\x7f\xde\x63\x24\x53\x35\x98\x50\x79\x31\x46\x86\x39\x29\x1a\x25\x06\x52\x60\x78\x2e\xad\xf6\xe0\x48\xa4\x3b\x12\x13\xa1\xcb\x7b\x2a\x21\xd2\xfa\x03\x01\xab\xc2\x8c\x28\x89\xee\x56\x5c\x12\x03\x05\x0b\x62\x61\x1b\x53\x04\x08\x4f\x67\x81\xe9\x0b\xa9\xe3\x10\x24\xbc\x58\xb4\xdf\xca\x49\x55\xf0\x75\x09\xda\xf3\x95\x0a\x35\x23\xaf\xf0\x90\xb2\x2a\xb0\x22\x83\x2a\x52\xbf\xf5\x62\x67\x19\x0a\xb3\xb8\xbc\xd7\x3a\x85\x6c\x72\x98\x22\xfb\xb0\xf9\x52\xdb\xa4\xb6\xb1\x2b\x96\x3f\x95\x10\x9f\xde\x4b\x34\x1f\xe1\x86\xd1\x3c\x09\xd8\x3f\x7b\x77\xd1\xb7\x79\x31\xb3\x0d\x1a\x36\xdd\x6c\x2d\xe9\x6c\x60\xda\x1b\xba\xb2\xe6\x86\x03\x3a\xbb\x8f\x84\x36\x39\x09\xc7\xc6\xbc\x76\x6c\x83\x2b\x7d\xbe\x88\x61\xaa\x82\x14\x26\x4b\x66\x30\x42\xf3\x86\xac\xe1\x75\x9b\xe5\xf0\xb0\x9b\x62\xdc\x73\x31\xec\xb5\x98\xf8\x25\x3c\xf0\x32\x1a\x75\x68\xb4\x36\x48\xe3\xa0\x75\xec\x21\xe8\x4c\xe3\xd0\xf8\x36\xec\xe6\xe0\xaa\x2a\x68\xf4\x26\xa9\x78\x4a\x84\x5e\xc2\xb5\xd0\x20\x62\xc4\x32\xfc\xf6\x87\xc1\xdb\x7a\x2d\x87\xd2\x10\x83\x3e\x4f\x2b\x5a\x45\x17\xd0\x58\x12\x5d\xee\xcb\x67\xb0\x13\x37\xf9\x48\xfa\x04\x5d\xb1\x63\xf4\x8e\x2b\xfd\x0f\xb0\xbb\x18\x62\x34\x95\x5d\x70\x22\xdf\x71\x05\xcf\xef\x05\x4d\x66\x82\x23\x90\x64\x5e\x80\x63\xc7\xcc\x09\x07\x53\x7d\x90\x2a\x63\xa2\x90\x87\x4f\x0d\x0a\x76\x48\xc3\xba\x62\x88\x0b\x87\x0d\x6f\xbd\x96\xf6\x07\xdc\xdd\x97\x71\x36\x89\x84\x9f\x9a\x61\x7e\xbf\xf5\x0b\x97\x2e\x82\xb7\x85\xc3\xf0\xc7\x62\xe8\x6f\x4d\xc5\x4c\x03\x7d\xd4\x12\xc4\x7c\x63\x52\xb0\x0a\x9c\x91\x1c\xe5\x35\xa0\x03\x47\x40\x4a\x25\xb0\x22\x4b\x9a\xa1\x92\x88\x25\xd1\xea\x49\xb6\x4a\xd9\xd6\x21\xce\x6a\x46\x84\xbf\x86\xc0\x06\x68\x04\x84\xc9\x0f\x20\xdf\xd2\x85\x8f\x79\xde\x30\xe6\x12\x57\x9a\x3c\xfe\x5b\x73\x59\xc0\xf2\xff\xa0\x0a\x53\x21\xa7\xe8\x0c\x52\x4a\x07\x62\xd4\xc3\x77\xac\xb5\x30\x04\xaf\x21\x6b\xb5\xf7\xa7\x9a\xde\xe2\x42\x4b\x02\xa3\xc9\x12\xa3\xc7\xf6\x82\xe5\x8b\x2d\x11\x79\x6c\x35\x0b\xcd\xb5\x4c\x24\x15\x95\xe8\xe0\x86\xac\x0f\x8e\x5b\x64\xd4\xaf\x9c\x4b\x74\x70\xc5\x0e\x8c\x1c\xd9\xa2\x5e\x2f\x74\x38\x2b\xd6\xe8\x00\xbe\x3b\x98\x6e\x49\xd5\x7e\x0d\x31\x4d\xda\xee\x51\x4d\x1e\xa4\x0b\xc5\x0b\x22\xc2\xa4\xea\xf6\x68\x1b\x54\x9b\x67\x61\x19\x8d\xcb\x3f\x80\xf2\x78\x7e\x9f\x8f\x4e\x81\xd3\xa7\xb4\x99\x0b\x90\xa6\x52\x38\x5b\x41\x96\xaf\x9b\x4b\x2f\x07\xc0\x6c\x8d\xf4\x0e\x2a\x23\xd3\x80\x7c\xec\xe5\x50\x09\x5a\x15\x04\xfd\xde\xd3\xea\x31\x01\xfd\xf2\x0f\x4d\x2e\x48\x0f\x50\x80\xa2\x1f\xf1\x4c\xea\xf7\xee\x7f\x7f\xd8\x31\xdb\x27\x2e\xda\xcd\xe4\x12\xd5\xae\x4b\x78\x18\x51\x96\x83\x5b\xcf\x2e\x18\xf0\x60\xe0\x68\xdc\xc1\x32\xa6\xe8\x52\xb3\xc3\x01\x6e\x53\x12\xcc\xa4\x33\x38\x82\x67\xb0\x01\x23\xad\xab\x33\xb8\xb6\x5a\x03\x4e\x9c\x31\x6b\xa2\x7a\xc7\x67\xd6\x16\x79\x8c\xae\xc1\x12\xde\x7c\x02\x27\xf2\x1d\xbf\xbc\x27\x59\xad\x06\xb2\x1e\x12\xa4\xe5\xa0\x4a\xd4\x42\xdc\xf7\x8d\x3a\x64\xd6\xd9\x52\x87\x1a\xca\x4f\x50\x88\x14\xb7\xb8\xed\xc1\xe0\x0d\x59\x7b\x71\xeb\xd4\x30\x10\x4d\x43\xa1\x0b\x9e\xe2\x9c\x30\x33\xb2\xf1\xdf\x9d\x09\xb2\x9c\x53\x66\x26\x68\x7e\xd4\x6d\xf3\xd0\x3e\x14\x85\x67\x7a\x5a\x97\x2e\x0a\x33\xb5\x87\x20\x3c\xae\xc1\xb5\xb0\xfe\x3e\x51\x7b\xb3\x59\xcd\x31\x9d\xa8\x5b\x67\xb3\x5a\x84\x5e\xe3\xe5\x4f\x35\x2e\x5a\x09\x3f\x03\x20\xed\xc3\xf6\xf5\x2d\xb9\x75\x47\x8b\x3c\xc3\xc2\xc6\x2c\x9a\x94\x6a\xc9\x63\xb7\x19\x0c\xcc\x2d\xc3\xcc\x73\xb0\x86\x32\xa4\x71\x68\x57\x58\x28\x9a\xd5\x05\x16\xae\x96\x42\x34\x75\x67\x70\x4f\x1a\xe2\x9d\x91\x8c\xb3\x3c\xf5\x0a\xf7\x71\xf3\xbd\x4d\x2f\x7c\x45\x04\xe5\x26\x96\x9f\xf6\xe4\x24\xda\x19\xb4\x8f\xd0\x51\xdb\x3b\xc2\x17\x8e\x37\xf9\x43\x9f\x62\x04\xb3\xd6\x77\x2f\xfc\xe9\x92\x71\x41\xf2\x57\x8d\x68\x68\x4e\xf3\x14\x7d\xbb\x76\xb6\xb8\xa1\x53\x46\x95\xcb\x98\x92\x44\x1d\x3b\xbf\x8b\x3d\x50\x76\xbb\x1a\x16\xb1\xe0\x82\xdc\x12\x81\x8e\xf2\x21\x32\x82\xfc\xab\x5b\x9a\xa9\x57\x53\xf4\x7f\x89\xe0\x40\x88\x8c\x2c\xb1\xa2\xb7\x5e\xeb\xf0\x66\x10\x41\xb0\x89\x43\x19\x80\xf8\x35\x3a\x02\x80\x88\x96\x25\xc9\x29\x56\xa4\x58\xbf\x32\xb6\x11\x82\xe4\x5a\x2a\x52\xc6\x08\x26\x66\x9a\x32\xb1\xaa\xf0\xdc\x6f\xbf\xe9\x79\x6a\x4c\xb6\xe6\x67\x97\x8d\xd6\x60\xcf\xc4\xba\x6c\x90\x86\x95\xd3\x11\xee\xda\x7b\x55\x08\xa3\x68\x6c\xa9\x04\xa7\xf7\x0f\x32\x57\x47\x6c\xe8\x6f\x9a\x26\x31\x12\x04\x0a\xa7\xd8\x73\xb5\xe3\xe9\x33\x71\xcc\x6f\x79\xcd\xba\xad\xd9\x2d\x04\xfd\x60\x8d\x3d\x9f\x83\x97\xda\xf9\xd2\x81\x9f\xe9\xd1\x94\xaf\xe0\xd7\x03\x8b\x3a\x46\x60\x46\x07\xc5\x47\x33\x28\xf3\x94\x8d\xc4\xea\xd3\xbf\x86\xa7\xbb\x87\x3c\x68\x98\x53\x6f\x1e\xcb\x83\xf2\xa4\x3d\xe8\x44\xea\x86\xb4\x0e\x1b\x98\xd6\x0e\x0e\xc3\xca\x32\x11\xa0\x4b\xc0\xdb\x00\x1d\x36\x24\x0b\x33\x20\xb9\x8d\x26\x05\x1e\x62\x81\xa2\xc3\xd3\xc3\x07\x89\x03\xb3\x38\xc1\x2b\xbc\x0c\xaa\xb5\x44\xd7\xb8\xf9\x1a\xca\x89\x22\xa2\x84\x22\x0e\x2b\x7e\x67\xbe\x37\x02\xb7\xb2\x4f\x0d\xba\xe2\x7d\x05\x8d\x15\x97\x20\x4f\xdb\x69\xd3\x70\xbe\xc1\xd1\x7f\x87\xd7\x08\x0b\x5e\xb3\xdc\xe8\x9c\x11\x56\x0b\x8c\xfb\xed\xc6\x64\xdf\x71\x06\xdc\xa7\x96\x26\xc9\x39\x94\x1b\x73\xa2\x86\xee\xfc\x94\xa1\xd7\xd3\xd7\x5f\x3f\x08\xe5\x23\xf2\xee\x61\x6e\x1b\xf6\x6c\x17\x35\xe2\x4e\xde\x83\xe6\x22\x08\xce\xdf\xb3\x22\x55\x2f\x7e\x6b\x08\x11\x5e\x9b\xc0\x65\x98\x2e\xc0\xeb\x70\x6c\x3e\xba\x13\x54\x91\x24\x69\x7d\x04\x45\x87\x10\x17\xa8\x66\xfe\xda\xf0\xaa\x9d\x7d\x0d\x8f\xc4\x96\x37\x1c\x19\x25\xeb\xf9\x03\x4e\xae\x39\xa2\x86\x38\x9b\x83\xeb\x49\xb3\xc7\x9b\x60\x86\x7d\xb7\xe3\x10\xb7\x13\xcc\xd1\x91\x79\x52\xeb\xb8\x9c\xab\x57\x0f\x0b\x97\x32\xcb\xbd\xbc\xaf\x52\x75\xee\x4b\x9b\x43\x8f\xaa\x94\xb5\xc7\x0d\xf3\x1a\x2b\x03\x6b\xff\x96\xac\xf0\x2d\x91\x48\xd2\x92\x16\x58\x0c\xe6\x7e\x29\x8e\x66\x66\x3d\x68\x5e\xab\xee\x0a\x1c\xdd\xd5\x1e\x22\xd7\x4d\x57\x37\x20\xa8\xf6\x10\xce\x3e\xf8\xa1\x66\xb3\x86\xe7\xd9\xb1\x8d\x6e\xee\x7a\x2f\x80\x87\xb9\xb5\xe8\x19\x94\xb5\xaa\x71\x31\xb8\x7a\x72\x9f\x15\xb5\xa4\xb7\xbb\x9e\x6f\x9b\x37\x35\x52\xd5\xd8\xd4\x32\x2a\x9e\xcf\x2a\x92\x3d\xb6\x8e\xd1\xbe\xf1\x69\x06\x99\x3b\x22\x82\xe8\x6e\x63\x02\x02\xcb\x4d\x1f\xce\xe6\x04\xe1\x2c\x23\x52\xba\xc8\xe6\x75\x18\x95\xed\x57\xf3\xfc\xca\xb0\xe0\x3b\x79\x59\x60\xa9\x68\xf6\x6d\xc1\xb3\x9b\x99\xe2\x22\xb9\xea\xc9\xd9\x8f\xb3\xad\x77\x37\x0a\xd7\x9c\xfd\x38\x43\x17\x54\xde\x0c\x90\x5a\x50\x02\xcb\x44\x22\x84\xa6\x35\x8c\x6e\xea\x39\x29\x88\x3a\x3c\x94\x46\x3a\x97\x38\x5b\x51\x06\x86\x99\xe1\x5b\x1e\x73\x69\x6d\xae\x0c\x99\xc6\xff\xd8\x50\x04\x9b\x38\x79\x62\xe9\xf9\x2b\x7c\x27\x89\x59\xf2\x5c\x2f\x59\x7f\x4d\x62\x35\x3a\xf6\xe0\xb3\x33\x3f\x7f\x75\xf1\x40\xaf\xdc\x42\x7e\xd4\x33\x4a\x77\xd6\x1c\xbe\xa1\x05\x31\xb7\x38\x58\x8c\x8b\x17\xb5\xa7\x03\x76\x6c\xcd\x6b\x74\x87\x07\x2c\xe6\x66\x28\x6e\xf8\xf0\x14\x7d\xa4\xd5\x29\xba\x64\xb2\x16\xa4\xb1\x6a\x2d\x36\x7e\x88\xca\x26\x3b\x35\x02\xd9\x5e\x36\x81\x3a\xcc\x35\x4c\xf3\x55\x7b\xf7\x44\x97\xf7\xb8\xac\x0a\x22\x4f\xd1\x01\xb9\x57\xdf\x1c\x1c\xa3\x83\xfb\x85\x3c\x88\xa5\xe6\x1c\x30\xb5\x90\x07\x53\x74\x55\xfa\xb0\x18\x28\xb9\x26\x88\x0b\x60\x34\xe0\xb4\x0a\x12\xe8\x10\x31\xc7\xd2\x23\x90\x5f\xe4\x17\x4d\x60\xad\xd6\x8a\x73\x8e\xee\x4c\x65\x21\x2d\xc4\x88\x10\x5c\xf8\x2c\x97\x00\xfd\x83\xf1\xca\x66\x64\xbc\xac\x04\x2f\xa9\x17\x60\xf6\x50\xee\x25\x1e\x17\x4c\x4e\xc3\x97\x01\xb4\x45\xa7\xa6\x04\xa7\x7d\x11\xb5\x95\x88\xdd\xa8\xf4\x6a\xe1\x82\xac\xcc\x45\xde\x5a\x6c\xc0\x6e\x60\x1f\xd2\x94\x17\xbd\x47\x21\xab\x73\x87\x94\xf8\xc6\x67\x16\xa3\x93\x9c\xdc\x9e\xc8\x1c\xbf\x3e\x86\x29\x4a\x1b\x59\x1c\xf7\x7e\x36\xab\xc5\x12\x1d\xbc\x3e\x98\xa2\x99\xd3\x6a\x8e\xc3\xd5\x37\xcf\xf5\x05\xfd\x36\xc3\x4d\x06\x1c\x52\x5f\x1f\xa0\x23\x2e\x60\x56\x19\x66\xa8\x20\xf8\xd6\x3a\x60\x0c\xaf\x59\x1b\x5b\xc6\xab\xa4\xdc\xf1\x94\x94\xd9\xc0\xce\xf3\xaf\xbf\x19\x14\x18\xb1\x5b\x03\xda\xae\x06\x60\xf1\x7a\xa0\xaf\x0b\x07\xa0\xde\x73\x57\x7e\x57\x2b\x1c\x50\x90\xd1\xc2\x8d\x21\xde\x2d\x9f\xb2\x2d\x0b\x89\x01\xbf\x45\x3c\x11\x88\x01\x69\x1d\xc0\xad\xe3\xe0\x89\xe5\x14\x1a\x51\x71\xc5\x89\xa2\x31\xd8\xff\xc4\xe8\x4f\x35\x41\x57\x17\xbe\xd6\x0a\x11\x92\x4a\xa5\xf9\x50\x4e\xe5\x4d\x5a\x3d\x00\xb8\xff\x6a\x85\xe2\xe8\xac\xc4\x7f\xe7\x0c\x5d\x7e\x3b\xb3\xd3\x79\xf5\xc5\x10\x36\xc8\xce\xf0\xdf\x6b\x41\xb4\xfa\x93\x1a\xa8\xe4\x9e\xdf\xd4\xa2\xf4\xe7\xe8\x02\x2b\x0c\xca\x94\xe1\x3e\x43\x36\x44\xd6\x48\x44\x4d\xe1\x73\xca\x72\xcb\xb2\x02\x5d\xe8\xf1\xd5\x16\xbd\xb7\xef\xfa\x74\xd3\xe6\x91\x4f\x1f\xae\x1e\xa8\xd8\x64\x20\x7f\x96\x6f\x79\x3e\x4a\xbb\xf9\x93\x46\xd0\xb9\x79\x17\x95\xfa\x65\xf4\x8e\x33\x72\x0c\xac\x00\x69\x5e\x60\xfe\x1b\x21\xcb\x1f\x05\x55\x24\x89\x0d\x46\x85\x9f\xc3\xd8\x88\x55\x7c\x0c\xac\x32\x20\xa8\x34\x99\xc0\xa9\xb2\x62\x70\x5e\xf0\xb9\x2b\xe4\xbd\xaf\x39\x7e\xfa\x70\x35\x72\x8a\x9f\x3e\x5c\x3d\xcd\xf4\x46\x2b\xb9\x9b\x3a\x6e\xa3\x03\x34\x29\x65\x8d\x7e\x14\x53\x20\x34\x04\xaf\xb9\x46\x75\xd3\xa9\xd3\x4a\x63\xea\x28\xe8\xac\x8f\xa3\x95\x26\xe2\x75\xb3\xea\x71\x04\xab\x87\x97\xf7\x95\x09\x94\xb5\x6e\xa5\xd9\x0a\x43\x95\x09\x97\x81\x6c\x36\x5e\x53\x42\x2c\x72\xaa\x22\xc2\x11\x88\xbe\x65\x03\x1f\x43\x17\xc4\x84\x15\xe4\xa7\x36\x10\xa8\x81\xa7\x5f\x88\x80\xdc\x02\xf7\x16\xa2\xcc\xf3\x53\xc3\xb3\x91\x09\x3a\xcf\x03\x6a\x3d\x4a\xa8\x15\x44\x99\x7f\x11\xdf\x62\x5a\xe0\x39\x2d\xa8\x5a\x6b\x0d\xe3\xd5\xb4\x15\x7f\x2f\x01\x19\x7b\x61\x19\x3b\xa8\x43\x5b\x66\x4e\x74\xa4\xa1\x9c\x80\x01\xf5\xd5\x34\x55\x13\x5a\x11\x61\x13\xc0\x8d\x32\x15\x2a\x51\x7a\xcd\x40\xeb\x1b\x5a\x52\x0a\x29\x0e\x2b\x21\xb0\x41\xfa\xcc\x8e\x11\xaa\xfa\xf9\x4e\xa1\x0a\x5f\xcc\x6c\x4d\xd4\x9f\x8b\x5c\x35\x79\xa9\x11\xc9\x0a\x24\x36\xf8\x4c\x9a\x6c\xfd\x67\xa3\x2f\xe4\x13\x7f\x47\x0a\x62\x3d\x3d\x66\x05\xb1\xab\xc7\x02\x25\xc4\x5d\xfc\x9e\xa1\xb8\x59\x82\xa8\x43\xc8\x96\xf7\x33\x92\x5d\xd3\xda\xf7\x91\xea\x1b\xa9\x85\x78\x1c\x55\x8c\x49\x77\x5d\x41\x98\xd4\x10\x29\x25\xce\x20\x23\xd5\x6a\x91\x1a\xf3\x71\x4e\xaa\xd5\x9b\x59\xdb\x38\xab\x3f\x43\x6f\x66\xf6\xd0\x05\xe7\x71\xd0\x16\x88\x95\x59\xb7\x34\xc6\xdc\x43\x89\x0a\xba\x20\x83\x81\x22\x7b\x3b\xab\x25\x67\x54\x71\xd1\x2f\xe3\xd2\x4e\xa1\x03\x33\x46\x02\x37\x95\x9d\xde\xda\xb7\x4d\x50\x6d\xc6\x8b\x82\x64\xca\xd6\x3b\xd4\x28\x8d\x10\xa3\xfb\xf1\xae\x4b\x16\xb1\x56\x0d\xdf\x91\xc7\x5c\xa8\x4e\xcc\x56\x9f\x7c\xb8\x3c\xbb\x78\x7b\x39\x2d\xf3\xaf\x56\xfc\x6e\xa2\xf8\xa4\x96\x64\x42\x23\x85\xa5\x9e\x36\x48\xd9\x8c\x2a\x5a\x41\xb1\x8d\xdc\xf7\x2e\x6d\x1c\x7d\x92\xa6\x58\x0b\x18\xc2\x9c\x8b\x92\x73\x75\x8c\x04\x56\xab\xa8\x2a\xa2\x56\xd8\xda\xe0\xea\xa2\x30\x04\xae\x04\x21\xc7\xa1\x59\xa0\x37\xc3\x6d\xd4\x12\x77\x31\x9c\x34\xcb\x8c\x30\xf5\xc8\x2a\x3d\xdf\xde\x3f\x53\xef\xb4\xaa\x3e\x1e\x59\xa6\x4a\x8f\x61\x15\x65\x08\xd3\x33\xff\xbe\x8b\x52\x02\xf7\xa8\xe2\x09\x25\x98\x20\x6f\x60\xc1\x85\xa6\x4a\xd1\xa6\x21\xa2\x32\x58\xfe\x49\x2d\x89\x98\x5a\x09\xf5\x36\x6e\x41\x7e\x2a\xc4\x26\x16\x91\x1b\x59\x07\x74\x13\xad\x1f\xc8\xc2\xa4\x5a\xb8\x3a\xf5\xd1\x70\x4e\x3d\x70\xad\x56\x84\x29\xd7\x1a\xc4\x22\xaf\x13\xcf\x36\xb3\xe3\x19\x21\x36\xa1\xe4\x5d\x7a\xa9\xba\x97\x42\x6e\x3d\x63\xaf\x85\xdc\xf4\x11\xdd\x59\x1a\xf9\x1c\x5a\x81\x73\x0e\x81\x46\xa6\xb6\xa9\x27\xd3\x18\x5d\x4a\x84\xf3\x92\xb2\xa7\xe5\xab\xc3\x8a\x23\x65\xf9\x10\x46\x36\x4a\xba\xc2\xd3\x6d\xcd\xd1\x40\x70\x5e\x11\xef\x63\x1e\x76\x23\x3b\x71\xce\x99\xf3\x43\xb7\xbd\xd0\x9d\x44\xbc\x89\xa2\x72\x2d\x7f\x2a\x26\xe6\xf7\x27\x55\xde\xe0\xe8\x9f\xc9\x71\xfc\x94\x46\x35\xeb\x66\x8b\x99\xd4\x5a\xee\xe0\x64\xd3\x5a\xec\xec\xb4\x0d\x6f\x8f\x44\x1f\xe8\x45\xe9\x7b\x74\xcc\xa6\xea\x79\x3b\xeb\x23\xd0\xab\x4d\xba\x74\x5f\x53\x69\xd3\x34\x7d\xb3\x16\x83\x78\xad\xcf\x0a\x0b\x5c\x12\x45\x84\x09\x27\xb5\xa1\xab\xcc\xe6\x56\xbd\xaf\x08\x9b\x29\x9c\xdd\xec\xab\xbc\xf7\x8b\x96\xf0\xbc\xb4\x84\x5d\x7c\xb1\x2e\xb0\x2d\xf7\x14\x63\xcb\xf0\xad\xc3\xa8\x81\xde\x58\x7a\x37\xcc\xa1\xfa\xc2\xfc\xcd\x57\xc1\x4c\xb5\x28\xf9\x1a\x88\x6d\xd5\xa0\xa9\x8a\x69\x4c\x45\xbc\x2e\x86\xb4\x02\x5f\x4c\x16\x92\x7e\x22\x11\x20\x7b\xac\xf0\x67\xb5\xb7\xb1\x5e\xce\x86\xe3\x94\x3c\x27\x68\x4e\x0d\xd3\xa9\x25\xd1\x7a\x4d\x66\xb3\x7c\x40\x18\x47\x36\x7d\xee\x13\x97\x42\x31\x6e\x52\x5c\xe6\x44\xdd\x11\xc2\xd0\xd7\x60\xad\xfc\xfa\xdf\xfe\xed\xdf\x92\xc2\x57\x91\x09\xba\xf9\xfa\xb7\xdf\x7c\x33\x45\x17\x54\x40\xa5\x07\x0a\x59\x48\x3e\x18\xb8\x72\x81\xab\x8c\x2b\x5f\x7f\x25\x3e\x55\xd8\x1d\x2b\x5c\x6c\xa0\x7d\x49\x97\x2b\x65\xfa\x39\xc2\xae\x17\x34\x53\xa6\xfe\x2c\x4f\xb0\xd4\x98\x4a\x81\xd2\x50\x09\xb6\x29\xac\x36\x78\x0a\x70\x7b\x8c\x0a\x7a\x43\xd0\x42\x7e\x27\x78\x5d\x1d\x47\x94\x4b\x64\xb3\xd1\x04\x91\xfa\xea\x68\xfb\x9a\x19\x4e\xd3\xec\x94\x24\xea\xc9\x23\x6c\x12\xec\x6a\x2d\x12\xbb\x6a\x29\x3d\xc7\xbe\x7e\xe7\xc4\x90\x46\x85\xa9\x88\xb7\xaa\xd3\x03\x42\x1c\x5a\x5d\x79\xfc\x3d\x3d\x0f\x4e\xaf\xcb\x50\xab\x04\xff\x5b\x0a\x31\x50\x66\xdd\x33\x4e\xfb\x97\x56\xfb\xb4\x89\xfb\x60\xb5\xb7\x77\x26\x5b\x8f\x25\xc6\xfe\x38\x53\xb6\x7a\x8c\xcf\xf2\x82\xb0\x9f\x00\x0b\x50\xce\x84\x4a\x7d\xb8\x6e\xc8\x3a\x76\x08\xb6\xd6\xd4\xcc\x3a\x6c\xef\xac\x56\xd2\x94\x09\xa8\x99\x81\x1d\x01\xdb\x94\x96\xb6\x3d\xd9\x2c\xdf\xb3\x9d\xfe\x6c\xca\x6f\xf3\x0b\x10\x6c\x1f\xbf\x1f\x6a\x60\x16\xd2\x56\x29\xd6\x56\xf8\x99\x24\xaa\xae\x62\x7d\xff\xf4\x80\x78\x44\xbd\x2a\x22\xa5\xcd\x7d\x2c\xb1\xb8\xd1\x97\x2f\xcb\xbf\xa6\x90\x21\x21\x7d\xae\xa6\x49\xca\xbd\x8d\x9a\x6e\x6c\x6b\xe1\x30\x6f\x49\x4f\xf0\x70\x3a\x3d\x34\xac\x85\x0b\x53\x35\xdb\x70\x02\xfd\xf9\x13\xd6\xd0\x68\x67\xb8\xe0\x2a\x68\x1c\x6c\x5b\xaf\xe1\x56\x76\x04\xb6\x98\x8d\xa9\xc7\xc9\x4a\x5d\x6a\xbf\x83\x94\x8e\x07\xfa\x99\x2a\xd6\xce\x69\x8c\x1e\x99\xd8\x1a\xa1\xbf\x6f\x9d\x3d\x4e\xf1\x5e\x06\xa3\x6a\xec\x23\x13\x2d\x34\x76\x5e\x11\x51\xbc\xd5\x03\xba\x6f\x68\xd6\x75\xec\xcf\xc1\x90\x04\xbe\x5a\x68\xba\x4f\x02\xba\xc1\xb9\xec\xf9\x0d\x74\x0e\xcf\xa2\x20\x73\x2d\x09\x66\x8a\xd0\x6d\x89\xd5\x24\xa8\x89\xa2\x37\x10\xae\x49\x60\x77\x16\xc0\x66\x8c\xeb\x0d\x93\x2e\x8c\xcd\x88\xbb\xa7\xcc\xd8\x3a\x07\x8e\x4b\x5a\x56\xb7\x68\xd0\x06\x65\x19\x62\xc5\xa8\xec\xda\xfc\x81\x9a\xa2\xb7\x96\x97\x6a\xca\x63\x08\xcf\x25\x2f\x6a\x65\xc0\xfb\x2f\xd3\xf0\x1d\x30\x63\x98\x9c\x2d\xbd\x63\x38\xb0\xff\x9d\x86\x35\x27\x4f\xd5\xf2\xcf\x38\x27\xb7\xaf\xa4\x1d\xfc\x97\x02\xdb\xbb\xe1\x6d\xef\x05\xec\x9b\x7e\x24\xb1\x45\xef\xbb\xd4\x7d\x26\x69\xea\x55\x73\x76\x85\x8e\x9a\xd6\x62\x36\xe8\x03\x5d\x31\x45\xc4\x02\x67\xe4\x55\x70\x05\x1d\x32\x31\xd9\xf7\x5c\x6e\xd4\x0a\xb3\xbc\xf0\xdd\x33\xc8\xbd\x22\x82\xe1\x02\x7e\x2d\x17\x14\x0a\x40\x9c\x15\xd5\x6a\x28\x89\x78\x41\xb0\xaa\xc5\x90\xf1\x6e\x7f\xf1\xbf\x30\xa5\x87\xde\x7a\x01\xc8\x98\xe8\x23\x83\x08\xab\x9a\xb3\xe0\x14\x06\x68\x4a\x10\x36\x06\xd7\x32\xbc\xe5\x4f\x35\x15\xc2\x6d\x0d\xe4\xd7\x9a\xd7\xc2\xba\x4a\x6c\x81\xf3\xe8\xb5\x41\xe8\x1b\xae\x99\x14\x96\x48\x90\xa5\x56\xe2\x05\x68\xfb\x26\x15\xb4\xa8\xf5\x07\x7b\x89\xd8\xdc\x63\x24\x6c\x10\xa9\x3a\x32\x7f\x6a\x61\xef\x0a\xfc\x96\xe6\x4e\xb7\x00\x17\x69\xd3\x15\xba\xc2\xd2\x98\xa2\x12\x2e\xcb\x41\x3f\x9a\x60\x37\x4d\x8a\x30\xe8\x27\xbe\x14\x40\x98\xb5\x13\x01\x1c\x26\x9f\x71\xa8\xaa\x14\xa9\x10\x98\x2a\x31\x78\x4e\xae\xeb\x79\x41\xe5\x6a\xb6\x83\xad\xf8\x5d\xc7\xeb\x26\x82\x67\xa4\xf7\xba\xcf\xba\x8c\x24\x61\x92\x82\x76\xa0\x25\x92\xd6\x47\xa8\xd6\x43\x39\x6c\x49\xdc\x6a\xb3\x79\xa6\x38\x64\x86\x15\xc4\x56\xa6\xd1\x5f\x05\x6b\xf8\x9c\x92\x2a\x65\xea\x6d\xe5\xe4\x13\xab\xc2\xb7\x50\x86\x8b\x42\x6e\x96\x4b\x70\x92\x25\x46\x8c\x1a\x28\xdc\x01\x5d\x01\x16\x44\x35\x59\xfa\x5e\x72\x1b\x85\xb6\x9b\x86\x6a\x31\x83\x50\x0b\xa1\x12\x95\xdc\xa4\x34\x32\xc4\x99\x03\x01\xc5\xc8\xdc\x2f\x35\x09\xe3\xb1\x19\x0b\x62\x8f\xc5\x9e\xda\x8d\xbf\x98\xee\x9f\x97\xe9\x7e\x07\xb7\x5c\xd3\xc8\x13\x07\x25\x30\x5a\xc5\xd2\x13\xc5\x90\x13\x66\x0f\xf1\xee\x8d\xcb\x17\x3b\x53\x4a\xd0\x79\xad\x46\x95\x8a\xfd\xbc\xf1\x2a\x68\x43\x44\x5a\x76\x33\xb1\x17\xd6\x2c\xa0\xed\xa8\x00\xc1\x2a\x38\x5a\xdb\x2c\xac\x91\xef\x20\xda\xcd\x87\x83\x85\x36\x60\xd2\x3c\xab\x7d\xed\x7b\xd8\x82\xc6\x57\x1d\x2b\xeb\x8c\xc6\x1c\xe2\x71\x55\x38\x43\xe0\x83\xf4\x98\xf3\x3b\x76\x87\x45\x7e\x76\x3d\x90\xd4\xd3\x56\xb1\x9a\x37\x42\x57\x8a\x03\x04\xcd\xa0\xf1\x9c\xd7\x2a\x42\x8c\xbe\xb2\x82\x2d\xd6\xf1\xe2\x59\x79\xf1\xac\xbc\x78\x56\x42\xcf\x8a\x7e\xbe\x5d\x82\xbf\x75\xc8\x5c\x9d\x1c\x3a\x50\x08\x19\x3d\xaa\xc9\x3a\x60\x05\x86\x5f\x6f\xa6\xda\x04\x9a\x65\x5c\x78\x72\x7b\x6a\x9a\xdd\x0f\xb4\x55\xd7\x4f\x01\x84\xf8\x97\x30\x7f\xef\xd9\xb4\xbd\x48\x6a\x6b\x6f\x46\x5f\x64\x7f\xd3\x21\xb9\xe5\x36\xab\x78\x74\x39\x66\x80\x0e\x81\x19\xe3\x46\x7a\xc9\x63\xdb\x8e\xe2\xd8\xde\x51\x59\xde\x74\x57\xd6\x9c\x22\xcd\x94\x9c\xd8\x57\xde\x8c\x11\x5b\x85\x46\x6c\x17\x82\x2d\x03\xac\xf4\x56\xe6\x6b\x8f\x31\x7b\xa7\x47\xa3\x44\xa6\x3d\xbf\xa9\xde\x98\x77\x7d\x0b\xe7\x6c\x45\x4a\x0c\xff\x7d\x33\x62\xd2\x66\x50\x89\xb4\xd2\xa6\x88\x29\x7e\x41\x44\x29\x11\x5f\x6c\x34\x69\xbf\x7d\x7d\x90\x66\xb6\x1f\xe9\x0b\x41\x8e\x96\x87\x8b\x9d\xb5\xc7\x76\xe9\x33\x6f\x9b\xd6\x74\xdc\xf4\x4f\x8c\x86\xa2\x04\xb3\x6e\x39\x4b\x81\x3f\x1a\x1c\x3f\xca\xb2\x5f\x5c\x40\x29\x30\x5f\x5c\x40\xfd\xe3\x09\x5c\x40\x81\xa8\x80\x43\x46\xa5\x53\x8d\x02\xb7\x50\x12\x6e\x42\x43\xaa\xf3\x1f\xcd\x89\xd3\x72\xa7\x4d\x71\x50\x4d\x8f\xd6\x2b\x94\x26\x82\x44\xdb\x2f\x7f\x38\x9d\x1e\x1e\x3a\x7f\x92\x25\xf0\x5a\x2d\x26\xbf\x43\x84\x65\x3c\x4f\x27\x3c\x3d\x4f\x21\x15\x28\x35\x8d\x75\x22\x74\x87\x95\x6e\xce\xe0\x64\x4a\x02\x6b\x62\x04\x60\x8e\xe9\x24\x91\xc8\x51\x5c\x19\x94\x37\x3b\x2b\x06\x8d\x3a\xe0\xcb\xaa\xd9\x85\xfb\x72\x74\x23\xf4\x82\xa6\x11\x9d\x69\xc7\x08\x47\xca\x35\xc5\x43\x47\xe6\xc3\x69\x56\xd5\x29\xe6\x13\xd7\xd3\x71\x5a\x92\x92\x8b\xf5\xb1\x07\xa4\x01\xb4\x20\xdb\x27\x86\x0a\x29\x36\x43\xdf\x5e\xb2\x5a\x08\xc2\xa0\x75\xdd\xf3\xd1\x3c\x12\x6b\xda\xa0\x1d\x14\x0f\xbf\x97\xf1\x44\xde\x66\x6c\xa4\x48\x78\xd7\x14\xd8\xfb\x9a\xee\xa2\x0b\x5f\x9b\x4a\xa6\xed\x2a\x0a\x5c\x7c\xf0\x36\x61\xb7\xe8\x16\x0b\x99\xb2\x03\x68\x17\x5d\x23\xa7\xb7\x54\xc6\x3a\x33\x05\x8f\x77\x1b\xd1\xa0\xaa\x6f\xad\xaa\x5a\x59\x3e\x8c\xf8\x62\x94\xa6\xe1\xca\x0b\xfa\x53\xb2\xa1\x6e\xbd\x8e\xe5\x0a\xb8\xf1\x0c\xbb\x0a\xb6\x47\xa4\xc7\x60\x7b\xa4\x76\x1c\xec\x7e\x6b\x04\x19\xec\xd8\xd4\x73\x73\xb8\xed\xdb\xed\x1c\x35\xe2\xb5\xa9\x63\xe9\x54\xd7\x3d\x1e\x00\xf0\x5a\x5c\xd0\xe4\x0c\xa8\x4b\xfb\x7c\x3b\xd0\x59\x91\xb2\xe2\x02\x8b\x35\xca\xad\x3d\x68\x1d\x53\xb9\x5a\x79\xf3\x41\xe2\xfc\x83\x0b\x6e\xc1\x8a\x72\x2a\x1e\x98\x04\x95\x98\x3d\x4f\x72\x5a\x97\x63\x4c\x7b\x3f\x42\x61\x4a\x5b\xf2\xd2\x79\xe1\x0d\x18\x5f\x59\x18\x67\x43\x75\x4d\x91\xeb\x0a\xe1\x71\x6d\x7a\x36\x85\xd5\xde\x0e\x36\xda\x3a\x68\xbd\x3a\xc1\xae\x65\x1b\x6e\x7b\x50\x66\x5e\x81\xe5\x90\x59\x37\x57\xd2\x69\x3a\xb2\x60\x5e\x69\x4d\xec\x2d\x88\xde\x27\xd8\x5d\x34\x22\xef\x97\xfe\x9d\x40\xc3\xda\x51\x15\x21\xb9\xc2\x45\xd0\x74\xb7\xe0\x19\x2e\xfc\x4e\x86\xe2\x2e\x65\x0f\xfd\x79\x72\xae\x13\xbd\x93\x7a\x5e\x46\xa9\x01\x0b\x5d\x21\x8d\xef\x98\x66\x78\x1e\x31\xc7\x81\xea\x8f\x8c\x9a\xe3\x77\xef\x63\xd0\xb5\xba\x96\x7a\x96\xd0\x41\x27\x78\x26\x02\xd3\x4f\xf2\xce\x55\xbe\x0e\xfb\x0f\xb7\x6f\x88\xfa\x9b\x99\xc3\x6b\xcc\x7d\xec\xaf\xd4\x90\xc7\xe5\xee\x55\xb2\x06\xad\xda\xce\xd0\x6a\x88\x7c\x01\xfe\x4e\xaf\xa1\xc4\x6e\xb3\xae\xc0\xf2\xd6\xd9\x60\xb4\x68\x1f\x0e\x57\x29\xb6\x48\x98\x31\x95\xa8\x66\x36\xe8\x67\x8b\x96\xbb\x49\xb9\x96\x44\x4c\x96\x35\xcd\xc7\x12\xf1\x33\x95\xde\x09\x32\x7b\x8c\xa4\x1e\x21\x9f\x77\x94\xca\x8b\x2c\x51\xb8\xbd\x39\x6f\x4b\xb5\x37\x74\x2e\x08\x3a\x5f\x61\xc6\x48\xb1\x55\x4e\x7a\x60\xaa\xdd\x85\xa6\xb7\xeb\x4c\x6f\x56\x93\x1e\x00\xb9\x9f\x1a\x50\xbf\xc4\xa4\xdc\xe7\x50\xe9\xee\x67\x58\x0d\xb9\xa8\x47\xd5\x41\x6e\xac\x8b\x6f\xce\x91\xc2\x62\x49\x94\x06\x81\x58\x5d\xce\x49\x92\x34\xfe\xd2\x95\x7a\x9f\x47\xee\xf1\x7e\x12\x84\xcd\x06\xfc\xf8\xe3\xbb\x51\x65\x9b\xba\xf6\xf0\x8e\x8b\x22\xbf\xa3\xb9\x89\x67\x94\xe8\x48\x03\x7d\xf5\xfc\xaa\x28\xdd\xdd\xd1\xa1\x9e\x6e\x91\xc5\x5a\x83\x33\x2c\x16\xc1\x6a\x6d\x6e\x2a\x8d\x2b\x12\x47\xf0\xd3\xaf\xd0\x25\x35\x9d\x7a\xf4\x5f\xc6\xb4\xd8\xb4\x23\xe4\x8b\x60\x4b\x12\x7c\xc8\xfa\xe4\x38\x13\x3b\xf4\xf2\x99\xd7\xd6\xc4\xc9\xd5\x0a\x49\x5a\xd6\x85\xc2\x8c\xf0\x5a\x16\xeb\x24\x92\x79\xba\xcd\x58\x14\xe4\xde\x50\x74\xaa\x54\xf5\x2f\xb4\xa5\xeb\x92\x30\x22\x68\xe6\xf6\x26\xc1\xaa\xe5\x62\x95\x21\xe6\x53\x52\xce\x48\x7e\xe2\x45\xad\x69\x7a\x02\xb1\xcb\x24\x43\x73\xac\x25\x6a\x55\xd4\x4b\x3a\xe0\xa3\xf9\xc5\xc6\x23\x37\x71\x93\xb5\x24\xa9\x57\x91\x94\xf4\xaf\x97\x52\xb9\x5e\x81\x08\xaf\x15\x81\x5c\xcf\x49\x45\x98\xe6\x10\x2c\xa0\xfc\xd8\x3d\x08\x10\xb1\xa7\xee\xf2\xaa\xaf\x01\x72\x33\xfa\x38\xe5\xe5\xbd\x12\x58\x33\xb6\x52\x2b\xa7\xce\x77\x46\x17\x5a\xe7\xdf\x57\xa9\x87\x47\x0a\x35\x43\x2f\x5a\xc1\x97\x2c\x63\xf6\x98\x81\xe0\x86\x8b\xdb\x43\xe2\x42\xb5\x7b\x83\xae\x23\x30\xed\x0c\xc3\xd4\xe4\x8d\xd0\xec\xce\xe0\xeb\x28\xff\xec\x0d\xcd\x96\x1d\x71\xa1\x66\x45\x49\x4c\x21\x8d\x36\x5e\xe2\xb4\xb7\xc6\x73\x8e\xd3\x5e\x14\x3c\xbb\x49\xae\x3a\xf6\xc6\x3c\xbd\x61\x9b\xb0\x1f\x6e\x94\x1d\x1b\xd2\x9e\xfa\x6d\x11\xf6\x4c\x05\xa2\x0b\x82\x85\xcc\x4f\x0c\x80\xd4\x47\x44\x70\x4d\xe9\xa6\xcc\xf5\x9c\xe8\x23\x2f\x6a\x36\x58\x61\x68\x9f\xe1\xb5\x58\x61\x39\xba\x9e\xf2\x66\x53\x03\x49\x94\x89\x00\x87\x6a\xab\x25\x51\x18\xaa\xb3\x4f\xfe\x90\x70\x50\x1c\xb2\x1c\x9c\x05\x17\x7e\x6f\x9a\x46\x81\x19\x67\x92\xe6\x44\x44\x63\x60\x31\xec\x82\x20\x59\xa4\xb1\x68\x7a\x4f\x05\x33\xaf\x4f\x9f\x46\x15\xf3\xd1\x8f\x6f\x60\xc8\xd2\x08\x18\x40\xa1\xe9\x4a\x73\x83\x8a\xb1\xdc\x45\x40\xaf\x16\xda\x43\x97\xb6\xcc\xc8\xb5\x6f\xf5\x32\xa2\x11\xca\xe1\x77\xe7\x97\xed\x17\xdb\xc7\xea\xbb\xf3\x4b\x74\x91\xd8\x38\x66\x74\x5b\xb9\x96\xb9\x6f\xf8\xa4\xee\xab\xad\xdc\x32\x23\x4d\x4b\x9c\x9c\xca\x9b\x27\x28\x0d\x58\xe5\x7b\x28\x0e\xff\xd2\x51\xee\x67\xde\x51\x6e\x8b\xf2\x7e\x69\x06\xd4\x97\x76\x72\x3f\xb3\x76\x72\x4f\x4c\xef\x4f\x6d\x13\x37\x6c\x77\x87\xee\x69\xa1\xe1\xe8\xfa\xa2\xf1\x73\x51\xa6\xc5\xe1\x14\xca\xd0\xc7\xc9\xb1\x55\xec\xcf\xb5\x61\x82\xf7\xbf\x04\x62\x1f\xc5\x56\x30\xea\x8e\x1f\x77\x4f\xb7\x2c\x00\x1d\x1d\xc2\xbf\x08\xe2\x86\xad\x0c\x4b\xaa\x3e\x90\x8a\x27\x2b\x5b\xe6\xf1\x0d\xbb\x2f\x55\xfa\x03\x2e\x29\x84\x09\x61\x05\x75\xa8\x84\xa2\x59\x5d\xe0\xa1\xa3\x20\x88\xb1\xfa\x4e\xd1\xc5\xe5\xf5\x87\xcb\xf3\xb3\x8f\x97\x17\xa7\xc8\xfd\x06\x0d\x35\xe7\x29\xfa\xc8\x1b\x33\xf1\x90\xa7\x36\xe8\xa6\x0c\x01\xb8\xcd\xfc\x8e\x2d\xf3\xc4\xac\x89\x7f\x80\xca\x69\x98\xa1\x2b\x46\x95\x0f\x74\x8c\xa9\x88\x59\xc1\x99\x0d\x0b\xd4\x70\xad\xa5\x7a\x49\xd5\xb1\x51\x08\x6d\x9f\x9f\x15\xf1\xbf\x33\x00\xd1\xd7\x6e\xbb\x36\x61\x53\x7e\xfa\x03\x76\x82\xbd\x69\x76\xcd\xb6\x3d\xf4\xde\xe6\x22\x97\xc6\xf4\x7a\x33\x0e\xab\x26\xc0\xcc\x48\x2b\x1f\x1d\xee\x42\xbd\xa3\xd2\x63\xb3\x24\x1b\xd2\x32\xf4\x70\x7a\xe8\x14\xb9\x62\x2b\x4f\xc1\xfd\x64\x04\xb0\xcb\x61\xd0\xaf\xb6\xa9\x7c\x8a\xd0\x7b\xb5\x22\xe2\x8e\x4a\x72\xac\xf5\xb0\x26\x21\x22\x6a\x59\xf2\xd3\x00\xf0\x61\x40\xfb\xc6\x49\xb2\x9f\xca\x7a\x9e\x3e\x61\xb5\xb2\x90\x6e\x09\x33\x08\xdd\x0f\x73\x75\x93\x1a\xb1\xbf\x1f\x9a\x95\x7c\xfa\xf0\xc3\x7e\xa6\x61\xce\xff\x88\x49\x9c\xf3\xb2\xa4\x0a\xad\xb0\x5c\xb9\x94\xf7\x20\x4a\xc9\x73\xa0\x07\x5f\x5f\x4d\x31\x96\xe4\x8e\x45\x87\xdf\xb9\x17\x36\xae\xab\xfe\xe3\xb0\x6f\xd1\xc0\xec\xe0\xe6\xb1\xdd\xb7\x28\x16\x7f\xd9\x57\x9c\xdf\xaf\xe3\x49\x6b\xcf\x13\x96\x9b\x12\xd7\x43\x97\xd0\xa1\xec\x93\x34\x26\xe5\x7f\x66\x54\x73\x42\xf7\xd2\xbb\xa0\xf4\xa7\x83\x64\x54\xad\x84\x5c\xa4\x9c\x28\x4c\x0b\x19\xec\xaf\xe2\x15\x2f\xf8\xb2\x3b\x80\x72\xc4\xf6\x7c\x65\xd2\x6a\x26\x78\xa2\xf7\x7d\x3f\xf7\xa0\x71\xfd\x8d\x5c\xb6\x90\x46\x4c\xb3\x3e\x7f\x0b\x80\xe4\x9c\x7d\xb5\x38\x79\x12\x04\xec\x5d\x95\xdc\xc4\x4a\x5c\xf9\x9e\x37\x0d\xa2\x80\xa7\x37\x55\x3c\x2a\x22\x4a\x2a\x35\xd3\x6a\xeb\x98\x11\x98\xfd\x1a\xe8\x63\xa2\x7a\x58\xf9\xd4\xbc\x6b\x38\x11\x74\xbb\x09\x2f\x10\x5b\x8b\x65\x56\x82\x4c\xc8\x3d\x95\x60\x79\x81\x74\x33\x2e\x92\x24\x7c\xd8\xa3\xd1\x19\xf5\x9c\x09\xd0\xbc\x5f\xac\x83\x58\xbe\x76\x6e\xd4\x90\xc4\x70\x76\x55\x08\x88\xc0\x45\xb1\x36\x05\xd4\xa1\xdc\x87\x31\x75\xe0\x25\x4c\x9e\x0b\xeb\xcb\xa8\x04\xbd\xa5\x05\x59\x0e\x5b\x11\x57\x94\x2d\x65\x53\x9a\x04\x17\x05\xbf\x23\x36\x01\x96\x6c\xad\x45\x6f\xb6\x54\x69\xf1\xb5\x40\xaf\xef\xde\x7f\x44\x8c\x68\x80\x2b\x2a\x1f\x7c\x59\xd1\x53\x89\x64\x0c\x4e\x26\x13\xb0\x12\x1d\xfd\x4d\xeb\xd3\x79\xf1\x0a\xfd\x48\xec\x0c\xf4\x15\x43\x9f\xd3\x4c\xa1\xbb\x15\x07\x3b\x40\x2d\xed\xfa\x52\xf6\xb6\x34\x8d\xd6\x31\xcb\xdd\xfb\x27\x1a\x86\x56\x2b\x8d\x60\x6d\x41\x82\x4a\x39\x12\x35\xde\xe1\xa7\xd0\xbd\xf7\x20\xd0\x76\xe2\xd4\xce\x0b\xe0\xd5\xee\xe0\x1c\x38\xcf\x69\xd4\x04\x64\x38\x3e\x46\x72\x5d\x16\x94\xdd\x1c\x23\xaa\x1c\xd3\xd3\x54\x69\xc3\xaf\xd9\x8d\x3b\x37\x82\xe0\x58\x59\xe7\x46\x52\xec\x81\xea\xf6\x22\x07\xd4\x48\x83\xf5\xc7\x75\x05\xb1\x39\xc8\x33\x2a\x1b\x26\x15\x9a\x02\x0e\x62\xb9\x5e\xcf\x05\x03\x54\xa6\xd7\xb0\x3c\xbc\x9a\x9d\xcf\xae\x36\xda\xe6\x9a\xcf\x5a\xce\x97\x98\xaa\xf4\x05\x9c\x2f\x7d\xe2\x0f\x96\xff\xa4\x5a\x30\xfd\xa9\xdf\xb0\x31\x41\x45\x3d\xf4\xad\x09\x56\xbc\xe6\x42\x0d\x9c\xb3\xc4\x0e\xf9\x2b\x5c\x9d\xd5\x6a\x75\x41\x65\xc6\x6f\xc9\xa8\x2b\x9f\xab\xc2\x6a\xdd\x27\x88\x3a\x12\x30\x90\xd0\xf9\x9f\xce\xae\x37\x3a\xe4\x3d\x58\x8b\x08\xe7\x3c\x23\x72\xe4\xfd\xb0\x7b\xc6\x16\xce\xa3\xcd\xf7\xc5\x19\xf6\x33\x77\x86\x01\x77\xf8\xa5\x39\xc0\x28\xa3\x8a\x62\xc5\x13\x92\xc5\xdb\x36\x96\x5a\x2a\x5e\xda\xa3\x73\xe5\x80\x40\x54\x06\x28\x14\x2d\xb8\xb1\x6b\x60\xab\xc5\x04\x05\x3d\x23\x93\xd4\x57\x43\xde\x88\xa5\x3e\x46\x8c\xdc\xc5\x40\xc2\xbc\xa8\x87\xf0\x7b\x1b\x29\x5f\x01\xb7\xfc\xc3\xe9\xef\xed\x21\xd2\x77\xf9\x3f\x38\x9b\x5f\x6c\x1f\x5c\xc5\x36\x6b\x52\xb2\xbd\xb4\xf6\x61\x4a\x82\x29\xff\x34\x86\x87\x59\x43\xaa\x59\xe7\x7f\xd6\xb8\x30\xd8\x7b\xb7\x2f\xeb\x5f\x7b\x07\x46\x4c\xcc\x51\x84\xc3\xfc\x3b\x67\x2d\xd1\xca\x3c\xe8\x08\xe6\x09\x25\x30\x93\x7a\x3b\x62\xa7\x36\x54\xa4\x0e\xad\x63\xf4\x10\x1d\xa9\xac\x4a\xaa\x41\xb9\xe7\x1c\x1a\x33\x79\x8b\xfd\x1f\x7c\xee\x4c\xca\x4c\xf6\xea\x27\x04\x3a\x1e\x63\xd4\x6a\x4d\xdc\x28\x0d\xe8\x07\x2a\x95\x89\x81\x36\xf0\xa0\x41\x2d\x4d\xa8\xc8\xa7\x35\xbd\x6b\xa8\xb4\x5e\xfd\x15\xe7\xb9\x38\x35\x72\xd4\x55\x17\x13\x70\x9f\xe6\xb6\x38\x3b\x66\x89\x85\x96\x8f\xd4\xba\xa2\x19\x5c\x9e\x3f\x9e\x5f\x03\x1c\x89\x7e\xf7\x5b\x53\xae\xe8\x5f\x7f\xf3\xdb\xaf\x93\x36\xfc\x69\x53\x5a\x76\xb0\x1b\x3d\xb1\x07\x72\x2f\xca\xcb\x2e\x51\xc9\xa0\x48\xcd\x9a\x5e\xc8\xf6\xdc\x1b\x0a\xd4\x5b\xea\x65\x44\x8c\xda\xc6\x2a\x63\x2f\x91\xb4\x5b\xe3\x39\x47\xd2\x22\x9f\x0c\x67\xf8\xd2\xc3\xb8\x9a\x61\x68\xd7\xbf\x1c\x86\x36\x88\xbb\x61\xea\x6c\x53\xa5\xe1\x14\xfa\x5e\x1d\x78\x5d\x21\x2b\xe7\xe2\xdd\xec\xaf\x3f\x9c\x7d\x7b\xf9\x83\x6d\x1e\x46\x7f\x1a\x2c\x8f\x12\xd6\x88\xdd\x21\xbe\x31\x9d\xb0\x63\x37\xf0\x61\xd4\xa4\x7b\xe6\xde\xbd\x99\x6d\x18\x32\xf4\x27\xa1\x3b\xae\x71\xba\x0d\x71\xc9\x1e\x77\xdc\x43\x6f\x1e\x6c\x11\xc5\xc4\x63\x1b\x27\x27\x10\x1f\xfe\xe0\xf4\xb8\x9d\xac\x97\xce\x40\x4f\xee\xdb\xc9\x60\x7a\x8f\xcc\xac\x1e\xf9\xe6\x37\x88\x7f\xf4\x05\x9d\x4c\x1a\x03\x06\x2b\xcf\xca\xbd\xf4\x08\xb8\x4d\x51\x51\xc4\xb8\xcc\xcb\xc3\x19\xbc\xe1\xdc\x98\xfa\x80\x9b\x28\x3a\xa1\x65\x81\x96\x02\x44\xca\x78\xd9\xaf\x0d\x4a\x7c\x0e\xb4\xd6\x44\x8c\x19\x96\x7f\x5e\x60\x3a\x50\xdf\x68\xe3\xc8\x75\xbd\x6c\xfe\x3b\x33\x06\xdd\xa4\xde\x44\x1b\x6d\x48\x30\xea\x84\xeb\xe3\x5d\xb0\xbd\x8c\x43\x29\xe1\xa1\xe3\xbc\x23\x7a\x1b\x94\x4c\x1c\xa6\x9b\x8f\xcc\x27\x99\x9e\xd0\x53\x30\x5a\xf8\xa1\x3d\x84\xd7\x7b\x38\x63\x88\xfe\xdc\xbd\xb4\x99\x72\xdc\xb3\x43\xf1\x0b\xc8\xf6\xfe\x21\x2c\x7d\x39\xee\xda\x9a\xad\x12\x73\x93\xbf\xe4\x06\xa3\x47\xe5\xe4\x3f\xee\xb7\xc1\xfd\x85\xcb\x5a\xde\xcf\x25\xaf\x5a\x71\xc5\xd9\x4e\xe9\x38\xd7\x1d\xaf\xb6\xdd\xf5\xe6\x89\x73\x93\x68\x56\x0c\x2a\xe3\xcd\x9e\x99\xf8\x63\xef\x1a\x82\x46\xad\x56\x7e\x71\xe6\x9c\x44\x6d\x17\xd1\x13\xe8\x48\xf9\xd5\xc5\x03\x4f\xed\x3f\x43\x5a\xfd\x5e\x0d\xf7\xa9\x21\x4c\xf9\xa8\xec\xb8\xab\x0b\xab\x57\xba\x0c\x38\x69\xc9\x14\x35\x74\x1a\x50\x63\x64\xfd\xd1\xdc\x98\x14\xa9\xcd\x85\xba\xe3\x62\x5c\x61\x8e\xeb\xd6\x4b\x1b\x51\x32\xf6\xbb\x11\xf9\xa5\xcf\xe3\x94\x99\xf9\x7e\x81\x93\x36\x03\x77\x7e\x80\xc4\x2e\xef\x58\x54\x31\x36\xf7\xc6\xae\xc3\x39\x70\xfc\x22\x30\x07\x0f\xa7\x39\x7e\x9d\xc7\x2e\xe9\x5e\xf0\x78\x87\x72\x07\x39\x19\x29\xe1\xe0\x05\x67\x64\x65\x3b\x95\x70\xd8\xcb\x45\xc4\x51\xef\xe8\x5e\x56\x57\x17\xd6\xf4\xa2\xf7\xaf\x61\x4b\xd8\x1f\xf2\x94\x50\xc2\x14\x46\xe3\x9a\xc8\x27\xf2\x18\xd3\x66\x66\xc1\x05\x54\x44\xa0\xa6\x48\x42\x53\xfd\xdb\x96\x48\x38\xb6\xcd\xc6\x4a\x5c\x0d\xd6\x86\xd6\x2c\x26\xec\x52\xf3\xf8\xec\xc4\x4e\xf4\x81\xdc\x64\xb7\x2e\x50\x6f\x63\xad\x9f\xc2\xe6\x4e\x31\xcd\x38\xa5\xf5\xd3\x56\x2b\xa7\x84\xeb\x6b\x67\xa3\xa7\xa1\x56\x4e\x11\x98\x69\xad\x1c\x36\xdb\x34\x44\xb9\x6a\xac\x89\xc3\x76\x83\x86\xd8\x3c\x5d\xfb\x86\x27\xf6\xde\x59\x82\x1c\x41\x46\xae\xbd\x93\x8b\xb0\x36\x27\x58\xa3\xf2\xe9\x3c\x62\x6d\xbd\xc3\xcf\xc0\xec\xa5\x2d\xe5\xe2\x59\x0b\xc2\x05\x4f\xa8\xd4\x1d\x90\x46\xd0\x15\xcf\x06\xb8\xac\xab\x78\xcf\xbe\xf4\x1a\xfd\x63\xca\xe8\x67\xae\x9b\xf4\xe8\x1e\x0b\x61\x81\x1c\xdf\xe3\xae\x81\x07\xa5\x13\x12\x60\x82\x64\xb6\xc8\x4c\x78\x7c\x64\xab\x82\xb1\x1d\x05\x92\xc8\xc3\x8d\xb6\xe8\x68\xa9\x14\xc7\x88\xe0\x6c\x85\x6e\xc8\x7a\x02\x1c\x2c\x11\x22\x42\x15\x86\xbc\x3d\xc0\xe5\x05\x56\x1b\xed\xac\xbc\xa1\x29\x6f\xda\x80\x27\x83\x76\xf9\x5e\x0d\xe5\xfa\xec\x3c\xa7\x36\x4b\xab\xc3\xa5\xc3\x5c\x71\x69\x0d\x2b\xd6\xc8\x72\x43\xd6\xc0\xac\x32\xce\xe0\x6e\x6b\x3f\x06\x2c\xa4\xb6\x1f\x82\x14\xb7\x8d\x26\x3c\x9a\x2f\x90\x1c\x9a\x9d\x6f\xaf\x24\x19\xae\x5f\x71\x13\x86\xa3\x45\x82\x3c\xb6\xee\xa0\xe0\x57\xc6\xe1\xd5\x36\x7c\xb1\xea\x34\x04\x06\x61\xc0\x45\x18\xf2\x33\x02\xad\x34\x83\x18\x64\xd3\x33\x1a\x60\x3a\xaa\xf0\x1b\x7f\x9c\x10\xcb\xdc\x0c\xbb\xc5\x92\xa8\xba\x32\x53\x86\x48\x2d\xbd\x66\x22\x25\x32\xb5\xbb\x4b\x2c\x6e\x46\x4c\xd3\xb5\xbb\x98\x42\x4f\x1d\xe9\x6b\x44\xfa\xe6\x32\x70\xc9\xc2\xeb\xe4\xc6\x4b\xa8\x69\xea\x61\x7a\xe0\x4c\xa7\xa6\x03\x0e\xe2\x62\x2b\x11\x32\x11\x62\xaa\x64\x68\xc6\x28\x26\xb0\xa9\x05\xe1\x0a\x22\xc7\x41\x2b\x05\x02\x00\x93\x30\xac\x21\x49\x4b\x09\x07\x4e\x34\x24\x36\x63\x24\x7b\x34\x63\x4c\x3f\x17\x33\x26\x7a\x65\x23\x9e\x4e\x6e\xae\x84\x76\xe0\xd9\x66\xdc\x90\xc8\xd5\x6b\x73\xb4\x83\xcc\x2c\xe7\x6a\x64\x51\x3a\xce\xd1\x2e\xbd\x5b\xcc\x48\x6d\x9d\xd6\x33\xeb\x81\x26\x6a\xa3\x80\x22\xd7\x72\x6d\xb8\x9d\xda\x48\x98\xdb\xcd\xd7\xba\x1b\xab\x8d\x04\xdb\xdb\x86\x2d\x5d\x6b\xdf\x1c\x7b\x6c\xc8\xd6\x8c\x84\xd6\x6c\x63\x21\x46\xee\x00\xb0\xff\x23\x61\xa6\xb6\x74\x6b\xc6\x2e\x5d\x6d\xcc\x18\xd7\xe6\xad\x19\xa9\x0d\xdf\x9a\xb1\x75\xb6\xdb\xed\xce\x78\x4a\x1a\xd0\xd6\xec\x6d\xaf\xb7\x12\x57\x5e\xd1\x51\x7c\x8a\xde\x1a\xf9\x66\xfa\x8d\x8c\xdd\x51\xdb\x1f\xce\xa5\x07\x59\x50\xa1\x08\x1c\xc5\x3a\xcd\x20\x05\x29\xb5\xc2\x60\xca\x05\x38\xa0\x81\xf8\x1c\xbf\x78\x2b\xcf\xc6\xc8\x5d\x33\x46\x33\xc6\xb4\x20\x34\x33\xc6\x85\xa2\x25\xcf\xfb\xc9\x43\xd6\xcc\x48\x0f\x5c\x4b\x06\x09\x01\x6e\x89\xe1\x6b\x66\x8c\xde\x2f\xa7\x03\x3e\xa0\x0b\xd9\xda\xe7\xa9\xb4\xf4\xdb\x78\x69\x8c\x66\x68\xc1\x07\x57\x03\x27\xba\x6c\xd3\x93\x51\xab\x8e\x1b\x3c\xed\x12\x52\xfa\xc4\x07\x8f\xc7\x6f\xcd\x01\xc4\x7f\xba\x7b\xf3\x76\x67\xef\xad\x56\xda\xc9\x64\x30\xe2\xee\xfa\xb4\xd7\x82\x58\x77\xf0\x11\x3c\x75\xa3\x30\x71\x7f\x9f\xf0\x11\x30\xc7\x74\x14\x0f\xde\x7a\xaa\xcb\xc6\x13\x5c\x1f\xc6\xf4\x20\x6f\x46\x62\x37\xf2\xb1\xca\x78\xd3\xbb\xbc\xbf\x2f\xf9\x48\x98\xdd\x5d\xcc\x47\xf6\x09\x6d\xc6\x4e\x5b\x8f\x76\xda\x7e\x34\xba\x7f\x79\x33\x76\xa3\x05\xb4\x53\x4f\xf3\x66\xc4\xbb\x9b\x8f\x06\x69\x8e\xa8\xef\x87\xde\xd5\xe7\x7c\x07\x98\x0f\xe9\x8c\x1e\xcc\x6c\xb7\xbb\x2f\xda\xa5\x5b\x7a\x33\xa2\x7d\xd3\x77\xc0\x87\xef\xb4\x8e\x1e\xdc\x41\xbd\x19\x2f\xa6\x81\xed\xf1\x62\x1a\x78\x31\x0d\xb4\xc6\x17\x32\x0d\xc4\xba\xc2\x8f\xc4\x18\x18\x17\x22\xfd\xe1\x47\x82\xec\xea\x26\xbf\xd1\x29\x7e\x24\xc4\x7d\xf5\x95\x6f\x46\x42\x87\xf9\x91\x10\xdb\xfd\xe8\xc7\xf6\x9a\x6f\xc6\x8e\xbc\x77\x97\xfe\xf3\xcd\x48\xec\x44\x3f\x12\x25\x4d\xdf\xfa\xfe\x9e\xf4\x23\x41\x76\x76\xb0\xdf\xe8\x4e\xbf\x23\xc8\xde\x5e\xf6\x5a\xeb\x1c\xbb\xf2\x1d\xbb\xda\x37\xe3\xc9\xb5\xd5\x11\x3d\xef\x9b\xb1\xbb\xb2\xba\x53\x1f\xfc\x66\xa4\x75\xc4\xdf\x41\x9f\x0a\x7b\xe8\x3f\xac\x37\x7e\x33\x1e\xa0\x6d\x8e\xec\x97\x1f\xbc\x18\xeb\x9c\xbf\x1b\x6e\x4c\xaf\x7d\xd4\xdd\x43\x7f\x07\x90\xbb\x75\xdd\x6f\xc6\x33\xed\xe0\xdb\x35\x46\x75\xe2\x6f\xc6\x6e\x3d\xf9\xb7\xdf\xdf\x89\xfc\xf6\xd4\xa7\xbf\x19\x63\x3b\xf6\x37\x23\xa1\x77\xff\xce\xd7\xa7\x47\x3e\xd2\x26\x06\x73\x4f\x76\x57\xdb\x42\x0b\xfa\xe7\x24\x2e\xf9\x97\x60\x72\x7d\xe2\x50\xa5\x59\x4a\xa7\xb3\x66\xbc\xc4\x29\xfd\x22\xe2\x94\x66\xb6\xb7\xdc\x0e\xb7\xf9\x94\x20\xa5\x26\xf8\x28\x19\x6c\x3c\x48\xc9\x69\x52\xe9\x26\xf0\x97\x20\xa5\x97\x20\xa5\xd4\x77\x5e\x82\x94\x5e\x2c\x91\x2f\x96\xc8\x17\x4b\xa4\x1d\x2f\x41\x4a\x2f\x41\x4a\x2f\x41\x4a\x3f\xcf\x20\x25\x5b\xec\x8d\x0b\x34\x86\x9b\xe9\xd3\xf8\x14\x01\x4a\xb6\xc1\xed\x59\x96\xf1\x9a\xa9\x8f\xfc\x86\x24\x79\xb1\x93\x2e\xcc\x5b\x90\x93\x56\x60\x6f\xd8\x8f\x77\x7b\x1e\xa3\x1e\x26\xab\x7a\x63\x95\x3c\x5c\xe7\x54\x5f\x79\x77\x22\xb3\x33\xfb\xb2\xbb\x89\x6a\xe1\xc5\x72\x92\x7b\xa8\xc9\x64\x66\xd9\x8b\xd2\xbb\x33\x45\x67\x48\x90\x8c\x56\x54\x33\x5b\x28\x7e\x02\x9f\x03\x1d\xa6\x5f\x47\x5d\xb7\x3c\xaa\x24\x29\x16\xb6\xf7\x18\x0b\x9a\xea\xee\x70\x79\xb4\xb2\xc3\x2d\xaf\x35\x6d\x73\xcf\xe5\xae\x09\x55\xfa\xcd\xd1\xf4\x30\x16\xe4\x6f\x4e\xad\xb1\x58\xf8\x18\xfe\x92\x33\x94\xa6\xdf\x58\xcc\x65\x3c\x58\xae\x9d\x2c\xae\x68\x5a\x5d\x2e\x0f\x69\x2c\xeb\x22\xf7\x15\x15\x70\x0c\x67\x24\xe3\x2c\xdf\xcd\xe4\x73\xb9\x09\xc5\x51\x99\xf5\xcf\x8c\xd8\xb6\xbc\x36\x80\x20\x49\x13\x17\x34\xa7\x6a\xed\xa3\x5b\x6c\x67\x6d\x6c\xf8\xc3\x08\xfc\x1a\x52\x95\xcd\x96\x21\x5c\x55\x82\xe3\x6c\x45\x64\x80\x83\x74\xa9\x02\xda\x8f\x29\xea\xe0\x73\x49\x4d\x67\x7e\xd0\xc7\x01\xb6\x56\xb6\xa2\xe9\xf5\xcd\x10\x5c\xb9\x50\xbb\x8d\x85\x86\x54\x66\x7f\x74\x9c\x59\xc6\xe8\x3d\x4a\xac\xa1\x84\x10\x0f\x7f\xca\x60\x83\x8e\xd3\x06\xed\x4b\x12\xf1\x22\x77\x95\x1c\x7f\xf7\x35\xaa\x88\xc8\x2c\x1f\x80\x5b\x18\x1d\x71\xb1\x51\x1c\x15\x5a\x39\xd5\x32\x6f\x11\xce\xac\xf5\x23\xbf\xf9\x06\xad\x78\x2d\xe4\xf4\x62\x87\x33\xf6\x1a\xde\x35\x06\x19\x77\xbb\x53\xa8\x20\x58\x2a\xf4\xfa\x6b\x54\x52\x56\x6b\x3d\x67\xd4\x51\x1b\x77\x07\x08\xb4\xff\xdf\x7e\x93\x66\x0f\x1e\xa1\xf7\x6f\xc7\x2c\xd9\x53\x58\x99\xbe\x3f\x56\xfd\x37\xdc\x26\x19\x71\xa6\x08\x87\xe9\xd8\xb5\x11\x77\x60\xc5\x6d\xb3\x5b\xa3\x0c\x90\x7b\xe4\x69\x3f\xd5\x7c\xbe\x56\xa9\xa5\x5e\xfe\xd3\x3c\xdd\xae\xf1\xe2\x3e\xdc\xaa\x55\x39\x30\xcb\xed\xd6\x71\x4d\xa9\xca\xde\xd7\xf6\xd8\x7f\x72\x49\xa5\x1a\xe8\xe4\x33\x89\x19\x17\xd2\x14\x90\xa5\xbe\xa7\x8f\x48\xb1\x87\x7b\xbd\xbb\x25\x3a\x93\x7a\x96\x11\x09\x76\x99\x0b\xdf\x91\x39\xb2\xfd\x8c\x9b\x5f\x1e\x7c\xec\x0b\x15\x97\x76\xc4\xf2\x85\x3b\x92\x25\xa0\x26\xae\xd3\x3b\x32\x1a\x85\x1b\xf3\x4a\xfb\x04\x49\xca\x96\xa6\x81\x58\x59\x17\x8a\x56\x51\xff\x89\xc3\xa2\x07\x67\xc5\x5e\xd8\xc5\x00\x07\x96\x64\x6c\xca\x27\x9d\x26\x94\xe9\x04\xe7\xd1\x91\x9b\x07\x22\x4c\x99\x3e\x55\x42\xcb\xd6\x0a\x0b\xec\xb7\x22\xe3\x65\x89\xe5\x2b\xe3\x6b\x88\x40\xc5\x10\x69\x63\x78\xaa\x16\x74\x02\x17\x1e\x7d\x61\x34\xc2\x3e\x48\x56\x11\x86\x59\xc4\x11\xd9\x36\xf1\xc0\x0b\x88\xdf\x31\xd7\xde\xc2\xf4\x31\x6d\xd3\x2a\x4a\xaa\x63\xf2\x2d\xce\x6e\x08\xcb\xa1\xd9\xb5\x41\x53\xbe\x66\xb8\xb4\x25\x98\x7d\x2b\x61\x92\x3b\xe8\x11\x98\x3e\x4a\xc3\xd8\x52\x4d\x2d\x14\x57\xb4\xc9\xe8\x4d\xfb\xc0\x59\x2d\x47\xd5\x2d\xfd\x24\xb5\x5a\x31\xcc\xa7\x12\xce\xa2\x24\x82\xde\x66\xc4\xe9\x6a\x7a\x12\xfb\x58\xcc\x6d\xa4\x7a\xd9\xd6\x72\x6c\xe5\x32\x1a\x9c\x18\x10\x50\xde\x55\x0a\xf5\x98\x71\xa1\x99\x4f\x4c\x2b\x75\x95\x75\x36\x88\x67\xbe\xde\x4f\x5b\x5c\x31\x4f\x2d\x99\x74\xf8\xe1\xdb\x8b\x36\x9b\xf9\x80\x73\x2e\xd1\xb7\x05\xcf\x6e\xd0\x05\x01\x35\xd9\xc9\xec\x41\x51\x4d\xf6\xdd\xee\x55\xcc\xf3\xa7\x6d\x71\x55\xe2\x65\xff\x41\x9b\xa0\x92\x33\xaa\xf8\x40\xab\xc2\x47\x2a\xf6\xf6\xd2\x5e\xe9\x79\xb5\x57\x12\xf3\x18\x32\x7e\x7e\xcd\x95\x34\xe9\x8f\xea\x26\xb8\x22\x48\x00\xa3\x80\x57\x5d\x9d\xfe\x1d\x0f\xf7\x57\x2b\x7e\x37\x51\x7c\x52\x4b\x32\xa1\x91\x08\xa4\xc4\x15\xdd\x90\x35\x04\x66\x8d\x58\xd3\xf7\xe6\x95\xd6\xa5\x4e\x71\xb0\xfa\xc2\xe7\x5a\x01\xf9\xf0\xed\x85\x96\x6a\xa9\x35\x6f\xa9\x44\x27\x44\x65\x27\x19\xa9\x56\x27\x76\x4a\xcf\x0a\x4d\x8e\xa7\x8d\xc1\xd3\x19\xca\x78\x51\xd8\x9a\x5a\x7c\x81\xce\x49\xb5\xf2\x80\x46\x38\x43\x1e\x79\xed\x4f\xdb\x5e\xa7\xe2\x7c\x4c\x3b\x90\xe0\x00\xe9\x37\xed\xf9\x09\xc8\x46\xcc\xf3\x3d\x76\xa6\x7e\x6c\x32\xda\x7b\xbf\x80\xfd\x77\x17\x7a\xc2\x53\xf7\x38\x5d\x8a\x0e\x67\xee\x25\x88\xd8\x0a\x1c\x91\x23\xda\x0e\xb9\x9f\x6e\x31\x33\x74\xb5\x30\x77\x8e\x9c\xe4\x88\xdf\x12\x21\x68\x4e\x24\xf2\xfc\xea\x22\xa9\x87\x8c\x9e\x14\x2d\x9e\x12\xcf\x2f\xdd\x93\xb6\xc6\x73\xef\x9e\x34\xf2\x06\x19\xb0\x49\xfd\xe6\x36\x9b\xc4\x79\x49\xd9\xcf\x84\x51\xca\x0c\x17\xe4\xea\x7d\xe2\xc5\x6c\x66\x9e\x6e\xdf\xcd\xdc\x87\x41\xf1\xf9\xa8\x89\xac\xaf\x30\xfd\xf7\x9e\x26\x11\xe3\xf9\x90\x5d\x7e\x6f\x37\xac\x25\x56\xe4\x6e\x40\x28\x4f\x1a\xc6\x38\xf4\xcc\x70\xf9\xf8\x97\xe2\xf6\xee\xda\x14\x9c\x13\x53\x77\x7b\x1f\xa2\xde\xee\xe1\x18\xc3\x99\x5b\x5a\xd3\xa9\xc6\x04\x61\x58\x62\x3e\xbb\xbe\x42\xdf\x19\xa8\xfb\xa9\xaf\x2f\xb8\x32\xda\xe9\x05\x2f\x31\x1d\xd5\xac\x34\x68\x33\x12\x4e\xf1\xda\x83\x44\x06\x66\xcc\x7c\xdc\x74\x5e\x5d\xd0\x65\xad\xef\xab\xf6\x16\xf9\x52\xac\xbc\x73\xc9\x8f\xa3\x32\x35\x1a\x53\x60\xa3\x73\x91\xf9\x8d\x1e\x64\x77\x39\xb2\x74\x10\x40\x3e\xac\x01\x49\xc2\x24\x05\x7f\x5f\x10\x6a\x03\xaa\x14\x04\xb9\x9a\xa0\xfa\x98\x15\xdd\x2a\x5d\xc7\xe8\x07\xbe\xa4\xcc\x9d\x7d\x6e\x9d\xdc\x0b\x4c\x23\xe1\xf1\x2f\x7a\x50\xcf\x78\xee\x7a\x90\x94\xc5\x25\xc3\xf3\x22\x16\xf1\xd4\x16\x41\x05\x86\xe8\x02\x02\x6f\x9e\xe4\x54\xea\x7f\xd1\x6c\xf6\x03\x78\x59\x6a\x96\x76\x03\x00\x7f\x83\x65\xb8\xbe\x80\x88\x61\x0a\xfb\x39\xa9\x86\xd3\x8d\xac\x3f\x7f\xc5\x72\x3d\x7d\x22\x5b\x81\x7a\x16\x96\xa9\xe9\x9f\xe6\x0e\xb4\x31\x44\x73\x82\x3e\xae\x68\x76\x73\x1d\x38\x53\xb8\xd0\x9f\xb1\xe0\xa3\x11\x97\x9b\xcd\x37\xf7\xc1\xc8\xed\xf2\xae\xc7\x19\x0f\x3e\x06\x92\x69\x66\x11\xa4\x41\x20\x2c\x25\xcf\xa8\xf7\xbd\x25\x38\xa2\x1a\x59\x89\x72\x10\x6c\xfb\x59\x16\x68\x2e\x0f\x90\xbb\x6e\xdb\xad\xc2\x85\x65\x20\x49\xe3\xa6\x00\x8b\x9b\xbd\x2c\xc5\x90\xdc\xc8\xde\x65\x1f\x5b\xdd\xca\x9c\xeb\xcb\x38\x86\xbc\xeb\x27\xa9\xfa\x83\xdb\x66\xa7\x79\xda\x06\x93\x1b\x1b\x1d\x76\x2d\xb3\x35\xfb\x1f\xbc\xf8\x58\xa2\x66\x97\x6d\x62\xc3\x5f\x6d\x3e\xb3\x0e\x21\x38\x94\x15\xaf\xea\x02\x0f\xba\x33\xc3\x06\x6c\x0f\xb5\xd4\x9b\x19\x3c\xd0\x75\xf4\x98\x4d\x37\x06\x92\x6b\xb6\xfa\x6f\xc4\x1d\x30\xae\x3b\x47\x5a\xff\x8d\xb4\x00\x2f\xc5\xd1\xd7\xbf\xfd\xe6\x9b\xae\x7e\x1d\xfd\x1d\x39\x52\x7c\x45\xdd\xfd\x3a\xfa\xd3\x64\x22\x30\x87\xfa\x75\xf4\x75\xe4\x48\xe0\x8f\x83\x69\x30\x09\x89\x2d\xfb\x6f\xc9\x91\x60\xb4\x1e\x91\x2d\x1c\x66\x01\xc7\x7c\x1a\xf1\x1c\xe1\xd1\x19\xb3\xbd\x99\xc1\xfd\x19\xbf\x51\x37\x59\x47\x3e\xf0\x70\x9e\x6f\x54\x5d\xea\xcd\x67\x1e\xcc\xee\x8d\x80\x6d\x32\x8c\x93\x72\x7a\x93\xe2\x67\x52\x33\x79\x83\x0c\xdd\x08\xc4\x84\xfc\xdd\x9e\xbc\xdc\x08\xe0\x8e\xac\xdd\x07\x66\xe3\x3e\x5e\x0b\x99\x78\xbe\x6d\x72\x16\xed\x88\xb4\x88\xd4\x94\x88\x94\x3c\xd9\xa4\x94\x89\x31\xe9\x12\x89\x99\xb0\x7b\xc8\x7f\x1d\x15\x71\x9f\x9a\xeb\x3a\x46\x08\x6b\x51\x95\x00\x12\x0d\xe7\xb5\x76\x64\xaa\x26\x01\xed\xcc\x66\xed\xcf\x4f\x4d\x82\x99\x22\x6e\xc7\x67\xa5\x26\x0a\xdd\x40\xac\x26\x81\xdd\x59\xf4\x9a\x31\x36\x8a\x7c\x5c\xfe\x68\x6a\xf4\x78\x52\xae\x68\x98\xfd\x99\x84\x9a\xde\x0c\xd1\x9e\x9c\xcf\x34\x7c\x6f\xe4\x85\x46\x33\x3d\x93\xa7\x3a\x32\xbf\x33\xf1\xe0\xa7\xe5\x06\x8e\xcb\x08\x6c\x6a\x91\xc7\x34\xd9\xb4\x44\xc0\x71\x36\xce\xb1\x8d\xb2\x43\xeb\x9b\xbd\x73\xb9\xfd\x33\xd1\x77\xbe\x66\x70\x5c\xd7\xaf\xf7\xd0\x28\x3f\x76\xeb\x42\x89\x17\x50\x03\x96\x0f\x48\xa3\xf6\xae\x9a\xe7\xdf\xcf\x36\x5c\x66\xfe\xe3\x9d\x9b\xcb\xee\xd9\x53\xf6\xe2\xa0\xfa\x92\xdd\x97\x5f\x7c\x27\x69\x8b\x6e\x7c\x27\xb2\x55\x31\xcf\x59\x75\x8c\x7e\xb4\x88\x36\x79\xe0\x73\x5b\xcf\xde\xb0\x59\x7f\x1a\xcf\xae\xaf\x50\x26\x08\xa4\x64\xe2\x42\x4e\xd1\x76\xfd\x8e\xa8\x8e\x6e\x2c\xd7\xa0\x67\x35\xd5\x9a\xb0\x52\xa4\xac\xa2\xb5\x5a\x5f\x5c\x27\x3d\xe3\xb9\xbb\x4e\x76\xb2\xc8\x7e\xf6\x2f\x39\xcb\xc0\xaa\x2e\x31\x9b\xe8\x73\x0c\x4e\x14\x6b\xae\x4d\x30\x09\x6d\x0a\x94\x29\x72\x99\x01\x80\x61\x30\x86\x41\xea\x91\x69\x91\x9c\xe0\x8c\x81\xfb\xa3\x17\xd2\x7b\x36\x5b\x03\xcc\x9d\x30\x65\xda\x0c\x6c\x9c\xfe\x8c\x57\x89\x15\x70\xad\xb8\xb5\x0b\xf4\x58\x73\x07\x3d\xe8\x64\x30\xa2\x76\x99\x5a\x11\xa3\xda\x5c\x43\x32\x60\x03\xa3\x5d\xa5\xc7\x5c\x71\x70\x51\xf0\xbb\xa8\x81\x73\x45\x5a\x02\x5e\xd3\x81\x5e\xa5\xcd\xe4\x9d\x13\x54\x52\x21\xb8\xb0\xa6\xf6\x70\x29\x11\xc8\x10\x04\xa0\x6f\x59\x44\x98\x0b\x88\xb0\x3e\xe2\x19\x51\x21\x41\x2a\x8e\x30\x33\x89\x1f\x71\x05\xcd\x85\xe7\x99\xbe\x33\x96\x03\xce\xc9\x0a\xdf\x52\x5e\x0b\x03\x5b\x71\x74\x60\xbf\x8a\x55\x11\xa5\x0b\x48\x1d\x70\x06\xdc\x1a\xa2\xcb\x3d\x56\x65\xc7\xee\x79\xca\x88\x23\x16\x2b\x94\x73\x67\x91\x9a\x90\x7b\x2a\xd5\x16\x40\xbf\x71\x49\x05\xb6\x13\xa8\xff\x56\x56\x5a\xd0\x7e\x8e\x64\xf7\xb4\x69\x3e\x7c\xa7\xad\x3e\xde\xce\xe0\xab\xdd\x95\x47\x9b\x4f\x6e\x52\xeb\x5c\xd0\xfe\xc3\x14\xc8\x94\x50\x2b\x33\xdf\xc1\x8e\x1b\x2f\x7a\xe8\x97\xd4\x43\xbd\xeb\xb7\xa0\xd9\x7a\x54\x0f\xfe\xc6\xe5\xab\x5f\x45\xdf\x62\x49\x72\xf4\x16\x33\xbc\x34\x17\xf5\xa3\xd9\xf5\xb7\x6f\x5f\xe9\xed\x4d\xa8\xb1\x79\x75\xd1\xe1\x4c\xf4\x42\xce\xfc\xc4\xbb\x7d\xa4\xa4\x6d\xad\x79\xa4\x04\xdf\xeb\xaa\xf7\x92\x64\x87\xbc\x90\x8b\xb7\x40\xd9\x2e\x1d\x60\x5c\xb9\xae\x22\x89\xdc\x64\x34\xb7\x65\x7e\xb3\xdb\x04\x29\x93\x0a\x17\xc5\x75\x81\xd9\x59\x55\x09\x7e\xbb\x6d\x1b\x69\x17\xae\xb1\x0f\x39\xc5\xc8\xc4\x39\xb9\x0f\x2b\x83\x6e\x88\xbf\x60\xe8\xaa\x81\xbd\x89\xbe\xde\xf9\x74\x69\xc5\xbd\x0f\x77\x57\x44\x8e\x3c\xde\xab\xe6\xf4\xbf\xa7\xb0\xd0\xac\xe3\x7c\xf6\x39\xe9\x1d\xa9\xb0\xaa\x5b\x6c\xb2\x87\x51\x77\xb1\xe6\x09\x2a\xb0\x54\x9f\xaa\x1c\xb7\xbd\x42\x7d\xfc\x37\xc3\x0a\x17\x7c\xf9\x27\x82\x8b\x6d\xb2\x6a\x6d\xdd\x79\xf8\xa4\xb3\xdc\x99\x5d\x9c\xd5\x73\xff\xe0\xa1\x44\xfa\x76\xe0\xca\x86\x08\x52\x90\xdb\xae\x5c\x41\x0b\x6e\x66\x4a\x98\x1f\xda\x55\x4f\xd1\x15\xf8\x7d\xb4\x66\xa5\xb9\x62\x4e\x14\x11\x25\x65\xed\xdf\x98\xc1\xb3\xe7\x9c\xe5\xb4\xbb\xdb\x3f\x58\x3c\x0d\x84\xf6\xef\x74\x13\x52\x97\x1f\xa7\xc7\x73\xd3\x66\x13\xc1\x9c\xda\xe8\x31\x8f\xcd\xad\x22\xbb\x32\x1f\x42\x80\x46\x6b\x3e\x1d\x87\x6e\x13\x9f\xe8\x86\xf1\x3b\x69\xca\x6a\x6d\x73\x91\x41\x11\xde\x2f\xba\x27\x6e\xdf\xcd\x34\xba\x42\xa0\x27\x76\xd6\xdb\x86\x86\x3e\x12\x33\x63\x58\xd0\x6f\xfe\x6c\x37\x23\xeb\xa2\x3b\xff\x8a\xc9\x89\xf6\xee\x60\xe3\x1e\x6b\x3d\xd4\xcd\x6c\xa3\xda\x4e\x5c\x45\x49\x69\xf4\xd5\x66\x77\x4d\x33\xa8\xad\x7b\x77\x44\x2d\x1b\x94\x06\x09\xbd\xb0\xda\x37\xff\x2b\x1b\x95\x2a\xec\x4d\x03\xa3\x8a\x12\x53\x47\x0b\x33\x8b\x14\x60\xe6\x04\x0f\xa9\x9b\xe6\x71\x2d\x46\xf4\xbd\x13\xde\x3a\xb6\xe1\x15\xc6\xde\x6e\x43\x6f\x9c\x5d\x1f\x9b\xaa\x4f\x03\x10\xff\x63\xf6\xfe\xdd\xc9\x77\xdc\xfa\xfa\x6d\xc6\xbe\xe6\x05\x20\x68\x8f\x91\xac\xb3\x15\xc2\x52\x2f\x47\x93\xb2\x3e\xf9\x64\x5a\x62\x46\x17\x44\xaa\xa9\x6f\x66\x21\xff\xfc\x9b\xbf\x0c\xc9\xd8\x37\x5c\x20\x9b\x86\x71\xec\x4a\x12\xd9\x55\x37\x94\x44\xa5\x41\x8d\x87\x1a\x2f\x70\x0c\x49\xe7\x16\x05\x77\xb0\x74\x85\x6f\xc0\x93\xe7\x5c\x72\x05\xbd\x21\xa7\xe8\x40\xeb\x73\xc1\x74\xff\x5b\x0b\xaa\xff\x19\xba\x30\x1d\xdd\x81\x84\x3e\xd0\x0f\x1e\x98\x49\xfa\x70\xe2\x30\x7a\xad\x99\xac\xc9\xac\x16\x74\xb9\x24\xc3\x91\x6b\xd0\x44\xe2\x96\x30\xf5\xca\x16\x68\x62\x3c\x00\xe3\xc2\x20\x9a\x68\x80\xcd\xc9\xff\xf9\x37\x7f\x19\x9c\x79\x1b\x7f\x88\xb2\x9c\xdc\xa3\xdf\x18\x3f\x01\x95\x1a\x63\xaf\x6c\x18\x8e\x5c\x33\x85\xef\xf5\xaf\x65\x2b\x2e\x07\x4b\x0e\x81\x95\x43\x71\xb4\xc2\xb7\x04\x49\xae\x2f\xfb\xa4\x28\x26\xd6\x23\x82\xee\x30\x14\x16\x73\x9b\x09\x15\x46\x50\x85\x07\x6b\x8b\x84\xc4\x3f\xb5\xa6\x28\x98\xa1\x26\xb6\x25\x73\x11\x0e\x0b\xca\x70\x61\xbd\x89\x50\x51\x78\xb8\x4b\x23\xe4\xb1\x1b\xd2\x52\x1c\x65\x2b\xcc\x96\xc4\x67\x61\xd7\xaa\x16\x64\xc0\x97\x98\x70\xf6\x6f\x28\x4b\xae\xb7\xf0\x3d\x65\x9b\x01\x34\xdd\x06\xbf\x25\x55\x2e\x3b\xc9\x46\xf7\xaa\xf5\x89\xde\x47\x41\xe7\xb5\xe2\x42\x9e\xe4\xe4\x96\x14\x27\x92\x2e\x27\x58\x64\x2b\xaa\x48\xa6\x97\x72\x82\x2b\x3a\xc9\x38\xd3\xd4\x04\x65\x6f\xca\xfc\x2b\xbd\x06\x39\xd1\xd3\x1c\x68\xd9\x92\xb0\xd0\x61\x0b\xe7\x17\xb5\x6c\x3e\x78\x5d\x11\x43\xd9\xf6\xe2\x8c\xe1\xe9\x09\x56\x08\xd6\x96\x93\x07\x2d\xd0\x75\x06\x19\x27\x2b\x0f\x6d\xb3\x9c\x6c\xf3\x7d\x7d\x8c\x4c\xf3\x00\x38\x7f\x9e\x5f\x0f\x9f\xc1\x12\xe7\x86\xd5\x63\xb6\x7e\x74\x92\xd7\x08\x86\xce\x53\xd9\x7a\x02\x20\x78\x31\xc1\x2c\xd7\xff\x37\xa9\x73\xd9\xfa\x41\x18\xad\x69\xf2\x91\xff\x74\x75\xf1\x34\x07\xa1\xa6\x0f\x38\xdf\x56\xb7\x4c\x50\xff\x8c\x42\x0d\xf1\x69\x4a\xd4\xc4\x09\xef\x96\xc2\xa7\xbf\xb4\x10\xff\xbd\x4f\x63\x02\x57\x9a\x2f\x12\x3a\xa4\x22\xf6\xbb\xb8\x02\xbd\x37\x61\xe6\x3f\x34\x4f\x87\x66\x3e\x08\x08\xc4\x52\x41\x45\xc5\xa6\x1c\x4a\x6b\x41\x66\xd1\x3d\x4b\x31\x22\xa5\x5b\x04\x45\x69\xc9\x85\x9c\xe8\x69\x4d\xb6\x4a\xdb\x65\xfe\x4a\x35\x7c\x15\xf4\x8f\xb5\x9b\x71\x9b\xa5\x29\x22\x15\xc2\xb7\x98\x16\xe0\xec\xe0\x73\x49\xc4\x2d\xee\xbe\xa8\x99\x6a\xc1\x78\xf3\x06\x69\xdb\xb8\x19\x45\xf0\x11\xef\x6c\x6e\x1d\xdb\xfb\xd3\x5e\x44\x07\x2a\xc3\x65\x99\x9b\xdd\xc6\x1a\x3a\xe7\xfe\x80\x1b\x9b\xb9\x20\x77\x7c\xa1\x21\x8e\xbc\x86\x69\xfa\xfb\x13\xc1\x42\xcd\x09\x56\x1f\x69\x9f\x9c\xdd\x22\xe7\xd6\x3b\xce\x86\xd3\x10\xf3\x1d\x41\x4b\xae\xb4\x52\x55\x03\xdd\xf7\x10\x30\x68\xc7\xa6\x9e\x99\x27\xb8\xc7\xa0\xe6\x66\xad\x1f\x05\x86\x04\x3a\xce\x46\x2c\xb6\xfd\xd2\xf6\x6a\xad\xf6\x3d\x30\x7f\x84\x94\x81\x01\x35\x77\x10\x67\xc4\xee\xa2\x71\xc3\xf4\x87\x9a\xef\x65\xe1\x25\x91\xb2\xb7\xd2\x4e\x3b\xe6\xd4\x3c\x69\x0e\xf3\x86\xaf\xb2\x74\xdf\x99\x94\x29\xad\x58\xe7\x44\x61\x5a\xf4\xb9\x64\xcc\x91\x36\x68\xf2\x18\x1c\xe2\xb7\xbd\xcb\x14\x04\xcb\x3e\xf5\x61\xb3\xbc\xa3\xe4\xcc\x4c\x9f\x33\x32\xb9\xe3\x22\x47\xe7\xb8\x24\xc5\x39\x96\xc4\xc2\x09\xf3\x64\xcd\x9e\x1d\xf6\x2d\x61\x1f\x93\xdf\xb6\xe2\xf5\x4c\xde\x18\xb3\x1c\x79\x59\xfa\x68\x2e\x77\x66\xaa\xc7\x40\x3c\x7c\x81\x3e\x8a\x9a\xf4\x39\x1c\xde\x68\x19\x77\x8c\x3e\xb1\x1b\xc6\xef\x76\x9b\xb5\xea\x75\xbf\xb4\xa3\x0a\x6d\xc1\x2e\xa8\x15\x6b\xab\x84\xb5\x8c\x56\x7e\xe2\x23\x67\x61\x19\x7f\x87\xa9\xb4\x2d\x80\xfc\x63\x6e\x16\xfa\xbf\x5b\xa6\x33\x7d\xd9\x14\x7c\x29\x88\x34\x85\xac\xb6\xaa\xd4\xa6\xd8\xb5\xbf\x23\xcc\x26\xca\x0e\x4e\xe9\xaa\xeb\x0d\x37\x3b\x27\xcf\x96\xcd\x37\x3d\x6e\x6d\xfb\xc3\x55\xb1\xa5\x80\xf4\x07\x97\x06\x93\xed\x30\xa9\xf5\xcd\xb2\xdb\x94\x16\x48\xbf\xe0\xd9\x6d\x29\xa6\xd5\x96\x21\x1b\xb0\x5b\xf1\xf9\xec\x73\x37\xd2\x3b\x25\xe0\x90\xd4\x1a\x36\xbe\x3d\xc4\xec\x36\x78\x26\x06\x4d\x6d\xbb\x1b\xd9\xcc\x87\x9d\x87\x63\xb4\x79\xad\x6d\x44\xeb\x84\xf9\x08\x86\xb5\xfd\x9b\xd4\x1e\xc3\x98\xf6\x28\x66\xb4\x47\x33\xa0\x3d\xc0\x74\x06\x06\xb2\xee\xd9\x8e\x37\x9a\xb5\x4d\x63\x9d\x50\x47\x99\xcb\x3a\x8d\x62\x9d\x60\xe3\x86\xb2\xc1\xd3\xda\x67\x1c\x7b\xfe\x66\xb1\xc1\x65\xf5\x99\xc2\x9e\x9d\x11\x2c\xba\x8a\x5e\xc3\xd7\xb3\x34\x79\x0d\x2e\x27\xc1\xcc\xf5\x30\x03\x57\x5f\x59\xf5\x9f\x83\x69\x6b\x10\x73\x3d\xe6\xac\x67\x66\xc8\x8a\x29\x87\x24\x8f\xa9\xa9\x57\xc1\x83\xa1\xa2\xda\x74\xa9\xf7\xa0\x5c\xb4\x51\xa8\x46\x8d\xd5\x56\xb5\xd2\x38\x38\x9f\x43\x3b\xa1\x64\xbd\xcf\x29\xac\x1d\x72\xd0\xdd\xa6\x5a\x13\x46\x17\x97\xd7\x1f\x2e\xcf\xcf\x3e\x5e\x5e\x6c\xea\x9a\x9b\x18\x1e\xd0\x02\xfb\xac\x20\x93\x40\x07\xdc\xfa\x4a\x33\xd6\xad\x0f\x3b\xc2\x1c\x27\xa8\xae\xb7\x3c\x92\xbb\xeb\x9d\x3b\x49\xa3\x9d\x78\xfd\xf0\x89\x4a\x39\x52\xfa\x44\xc1\xce\x9b\x50\x39\xad\xe7\xac\x78\x91\x4b\x1b\x3c\x8c\xae\x2e\x6c\x78\xfb\x31\xa2\x2c\x2b\xea\xbc\xcf\xfa\xf0\xe9\xd3\xd5\x85\x9c\x22\xf4\x2d\xc9\x70\x2d\xc1\xee\x93\x73\x76\xa8\xd0\xfb\x77\x3f\xfc\x1f\x08\xd5\x87\x27\x8e\x7d\x75\x22\x28\x55\x4e\x31\x68\x5e\x66\x05\xf0\x76\x1f\x7c\x3b\xc3\x0c\x57\x9a\x2b\x49\xd3\xde\x4a\x81\x4e\xb1\x22\x45\x25\x51\xa9\xf5\xc2\xa6\x9c\xb4\xfe\x31\xf8\xb6\x3f\xec\xdc\xc6\x86\x2e\x89\x32\x09\x88\x7d\xe1\x9f\xbd\x58\x1e\xb0\xfd\x3e\xc0\xea\x1b\x9e\x9e\xad\xd9\x58\x5b\xc0\x1d\x96\xd6\x9e\xb6\x35\xe3\x01\x9a\x18\xb6\x10\x75\x9b\x56\x7a\x8c\x2a\x86\x05\xc3\x5f\x5b\x57\x6c\x3d\xb9\xc6\x52\xd2\xc1\x27\x14\x87\x38\xa0\x04\x8b\x6e\x77\xe8\xd4\x76\x23\x98\x2d\xeb\x09\xd9\x44\x74\xeb\xc7\x7c\x79\xb5\x7e\x54\xb7\x7e\xff\xff\x07\x00\x00\xff\xff\xfa\xb4\x3c\xdd\x40\xa3\x01\x00") + +func operatorsCoreosCom_subscriptionsYamlBytes() ([]byte, error) { + return bindataRead( + _operatorsCoreosCom_subscriptionsYaml, + "operators.coreos.com_subscriptions.yaml", + ) +} + +func operatorsCoreosCom_subscriptionsYaml() (*asset, error) { + bytes, err := operatorsCoreosCom_subscriptionsYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "operators.coreos.com_subscriptions.yaml", size: 107328, mode: os.FileMode(420), modTime: time.Unix(1586375941, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "operators.coreos.com_catalogsources.yaml": operatorsCoreosCom_catalogsourcesYaml, + "operators.coreos.com_clusterserviceversions.yaml": operatorsCoreosCom_clusterserviceversionsYaml, + "operators.coreos.com_installplans.yaml": operatorsCoreosCom_installplansYaml, + "operators.coreos.com_operatorgroups.yaml": operatorsCoreosCom_operatorgroupsYaml, + "operators.coreos.com_operators.yaml": operatorsCoreosCom_operatorsYaml, + "operators.coreos.com_subscriptions.yaml": operatorsCoreosCom_subscriptionsYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "operators.coreos.com_catalogsources.yaml": &bintree{operatorsCoreosCom_catalogsourcesYaml, map[string]*bintree{}}, + "operators.coreos.com_clusterserviceversions.yaml": &bintree{operatorsCoreosCom_clusterserviceversionsYaml, map[string]*bintree{}}, + "operators.coreos.com_installplans.yaml": &bintree{operatorsCoreosCom_installplansYaml, map[string]*bintree{}}, + "operators.coreos.com_operatorgroups.yaml": &bintree{operatorsCoreosCom_operatorgroupsYaml, map[string]*bintree{}}, + "operators.coreos.com_operators.yaml": &bintree{operatorsCoreosCom_operatorsYaml, map[string]*bintree{}}, + "operators.coreos.com_subscriptions.yaml": &bintree{operatorsCoreosCom_subscriptionsYaml, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/go.mod b/go.mod index 6306c0461..e7b727c46 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,8 @@ go 1.13 require ( github.com/blang/semver v3.5.0+incompatible github.com/ghodss/yaml v1.0.0 + github.com/go-bindata/go-bindata/v3 v3.1.3 + github.com/mikefarah/yq/v2 v2.4.1 github.com/operator-framework/operator-registry v1.5.3 github.com/pkg/errors v0.8.1 github.com/sirupsen/logrus v1.4.2 @@ -14,4 +16,6 @@ require ( k8s.io/apiextensions-apiserver v0.17.3 k8s.io/apimachinery v0.17.3 k8s.io/client-go v0.17.3 + sigs.k8s.io/controller-runtime v0.5.2 + sigs.k8s.io/controller-tools v0.2.8 ) diff --git a/go.sum b/go.sum index 2966dc4f0..aa332641a 100644 --- a/go.sum +++ b/go.sum @@ -54,10 +54,8 @@ github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.15+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= @@ -65,7 +63,6 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -96,6 +93,7 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -106,6 +104,8 @@ github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -117,9 +117,12 @@ github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb h1:D4uzjWwKYQ5XnAvU github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-bindata/go-bindata/v3 v3.1.3 h1:F0nVttLC3ws0ojc7p60veTurcOm//D4QBODNM7EGrCI= +github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -187,6 +190,8 @@ github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85n github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= +github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0 h1:xU6/SpYbvkNYiptHJYEDRseDLvYE7wSqhYYNy0QSUzI= @@ -201,6 +206,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekf github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -233,6 +240,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -258,6 +267,7 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= @@ -277,6 +287,7 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= @@ -304,12 +315,20 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mikefarah/yaml/v2 v2.4.0 h1:eYqfooY0BnvKTJxr7+ABJs13n3dg9n347GScDaU2Lww= +github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= +github.com/mikefarah/yq/v2 v2.4.1 h1:tajDonaFK6WqitSZExB6fKlWQy/yCkptqxh2AXEe3N4= +github.com/mikefarah/yq/v2 v2.4.1/go.mod h1:i8SYf1XdgUvY2OFwSqGAtWOOgimD2McJ6iutoxRm4k0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -333,11 +352,15 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= @@ -418,6 +441,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -457,6 +481,8 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -465,6 +491,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -514,6 +542,7 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -521,10 +550,10 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -552,8 +581,15 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0 h1:s5lp4ug7qHzUccgyFdjsX7OZDzHXRaePrF3B3vmUiuM= +golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= @@ -587,13 +623,19 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/imdario/mergo.v0 v0.3.7 h1:QDotlIZtaO/p+Um0ok18HRTpq5i5/SAk/qprsor+9c8= +gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -603,44 +645,48 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 h1:B0J02caTR6tpSJozBJyiAzT6CtBzjclw4pgm9gg8Ys0= +gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= -k8s.io/api v0.16.7 h1:pCzC0lCpriUQzAT/MLP3pjOrXnd005E+73oO2wFodS4= -k8s.io/api v0.16.7/go.mod h1:oUAiGRgo4t+5yqcxjOu5LoHT3wJ8JSbgczkaFYS5L7I= +k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= +k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= k8s.io/api v0.17.3 h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0= k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY= -k8s.io/apiextensions-apiserver v0.16.7 h1:lp6iCIzGsYLnAmHKSqc6o3OyodCpsm+mhseowylxhPs= -k8s.io/apiextensions-apiserver v0.16.7/go.mod h1:6xYRp4trGp6eT5WZ6tPi/TB2nfWQCzwUvBlpg8iswe0= +k8s.io/apiextensions-apiserver v0.17.0/go.mod h1:XiIFUakZywkUl54fVXa7QTEHcqQz9HG55nHd1DCoHj8= +k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= k8s.io/apiextensions-apiserver v0.17.3 h1:WDZWkPcbgvchEdDd7ysL21GGPx3UKZQLDZXEkevT6n4= k8s.io/apiextensions-apiserver v0.17.3/go.mod h1:CJbCyMfkKftAd/X/V6OTHYhVn7zXnDdnkUjS1h0GTeY= k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= -k8s.io/apimachinery v0.16.7 h1:MWxTXXh1ianCotNCj4ehx8eu0UyvtJl4cvn6riSJymQ= -k8s.io/apimachinery v0.16.7/go.mod h1:Xk2vD2TRRpuWYLQNM6lT9R7DSFZUYG03SarNkbGrnKE= +k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= k8s.io/apimachinery v0.17.3 h1:f+uZV6rm4/tHE7xXgLyToprg6xWairaClGVkm2t8omg= k8s.io/apimachinery v0.17.3/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg= -k8s.io/apiserver v0.16.7 h1:qacdQPVDKmS2uzky6MR/19Vpw+Z0SXwJdH6Z+/Sy6uA= -k8s.io/apiserver v0.16.7/go.mod h1:/5zSatF30/L9zYfMTl55jzzOnx7r/gGv5a5wtRp8yAw= +k8s.io/apiserver v0.17.0/go.mod h1:ABM+9x/prjINN6iiffRVNCBR2Wk7uY4z+EtEGZD48cg= +k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= k8s.io/apiserver v0.17.3 h1:faZbSuFtJ4dx09vctKZGHms/7bp3qFtbqb10Swswqfs= k8s.io/apiserver v0.17.3/go.mod h1:iJtsPpu1ZpEnHaNawpSV0nYTGBhhX2dUlnn7/QS7QiY= k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= -k8s.io/client-go v0.16.7 h1:nipZSn8iGEsAhpVEx88EfOiSgKu5qPAPX1GMoRnRTB8= -k8s.io/client-go v0.16.7/go.mod h1:9kEMEeuy2LdsHHXoU2Skqh+SDso+Yhkxd/0tltvswDE= +k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= +k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= k8s.io/client-go v0.17.3 h1:deUna1Ksx05XeESH6XGCyONNFfiQmDdqeqUvicvP6nU= k8s.io/client-go v0.17.3/go.mod h1:cLXlTMtWHkuK4tD360KpWz2gG2KtdWEr/OT02i3emRQ= k8s.io/code-generator v0.0.0-20190912054826-cd179ad6a269/go.mod h1:V5BD6M4CyaN5m+VthcclXWsVcT1Hu+glwa1bi3MIsyE= -k8s.io/code-generator v0.16.7/go.mod h1:wFdrXdVi/UC+xIfLi+4l9elsTT/uEF61IfcN2wOLULQ= +k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.17.3/go.mod h1:l8BLVwASXQZTo2xamW5mQNFCe1XPiAesVq7Y1t7PiQQ= k8s.io/component-base v0.0.0-20190918160511-547f6c5d7090/go.mod h1:933PBGtQFJky3TEwYx4aEPZ4IxqhWh3R6DCmzqIn1hA= -k8s.io/component-base v0.16.7 h1:s2DqQ6jL11ms0Z8QC3iwn2qaAwtjEZk9K+RCkGDTa74= -k8s.io/component-base v0.16.7/go.mod h1:ikdyfezOFMu5O0qJjy/Y9eXwj+fV3pVwdmt0ulVcIR0= +k8s.io/component-base v0.17.0/go.mod h1:rKuRAokNMY2nn2A6LP/MiwpoaMRHpfRnrPaUJJj1Yoc= +k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= k8s.io/component-base v0.17.3 h1:hQzTSshY14aLSR6WGIYvmw+w+u6V4d+iDR2iDGMrlUg= k8s.io/component-base v0.17.3/go.mod h1:GeQf4BrgelWm64PXkIXiPh/XS0hnO42d9gx9BtbZRp8= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -665,9 +711,12 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +sigs.k8s.io/controller-runtime v0.5.2 h1:pyXbUfoTo+HA3jeIfr0vgi+1WtmNh0CwlcnQGLXwsSw= +sigs.k8s.io/controller-runtime v0.5.2/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A= +sigs.k8s.io/controller-tools v0.2.8 h1:UmYsnu89dn8/wBhjKL3lkGyaDGRnPDYUx2+iwXRnylA= +sigs.k8s.io/controller-tools v0.2.8/go.mod h1:9VKHPszmf2DHz/QmHkcfZoewO6BL7pPs9uAiBVsaJSE= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff v1.0.2/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 000000000..9d2527f1c --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,14 @@ + /* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/pkg/operators/clusterserviceversion_types.go b/pkg/operators/clusterserviceversion_types.go index eb8fdd161..fded5c3cb 100644 --- a/pkg/operators/clusterserviceversion_types.go +++ b/pkg/operators/clusterserviceversion_types.go @@ -77,7 +77,7 @@ type StatusDescriptor struct { DisplayName string Description string XDescriptors []string - Value *json.RawMessage + Value json.RawMessage } // SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it @@ -86,7 +86,7 @@ type SpecDescriptor struct { DisplayName string Description string XDescriptors []string - Value *json.RawMessage + Value json.RawMessage } // ActionDescriptor describes a declarative action that can be performed on a custom resource instance @@ -95,7 +95,7 @@ type ActionDescriptor struct { DisplayName string Description string XDescriptors []string - Value *json.RawMessage + Value json.RawMessage } // CRDDescription provides details to OLM about the CRDs diff --git a/pkg/operators/v1/operatorgroup_types.go b/pkg/operators/v1/operatorgroup_types.go index f36b04804..ebc0c5931 100644 --- a/pkg/operators/v1/operatorgroup_types.go +++ b/pkg/operators/v1/operatorgroup_types.go @@ -1,6 +1,7 @@ package v1 import ( + "fmt" "sort" "strings" @@ -15,6 +16,9 @@ const ( OperatorGroupProvidedAPIsAnnotationKey = "olm.providedAPIs" OperatorGroupKind = "OperatorGroup" + + operatorGroupLabelPrefix = "olm.operatorgroup/" + operatorGroupLabelTemplate = operatorGroupLabelPrefix + "%s.%s" ) // OperatorGroupSpec is the spec for an OperatorGroup resource. @@ -95,3 +99,32 @@ func (o *OperatorGroup) HasServiceAccountSynced() bool { return false } + +// OGLabelKeyAndValue returns a key and value that should be applied to namespaces listed in the OperatorGroup. +func (o *OperatorGroup) OGLabelKeyAndValue() (string, string) { + return fmt.Sprintf(operatorGroupLabelTemplate, o.GetNamespace(), o.GetName()), "" +} + +// NamespaceLabelSelector provides a selector that can be used to filter namespaces that belong to the OperatorGroup. +func (o *OperatorGroup) NamespaceLabelSelector() *metav1.LabelSelector { + if len(o.Spec.TargetNamespaces) == 0 { + // If no target namespaces are set, check if a selector exists. + if o.Spec.Selector != nil { + return o.Spec.Selector + } + // No selector exists, return nil which should be used to select EVERYTHING. + return nil + } + // Return a label that should be present on all namespaces defined in the OperatorGroup.Spec.TargetNamespaces field. + ogKey, ogValue := o.OGLabelKeyAndValue() + return &metav1.LabelSelector{ + MatchLabels: map[string]string{ + ogKey: ogValue, + }, + } +} + +// IsOperatorGroupLabel returns true if the label is an OperatorGroup label. +func IsOperatorGroupLabel(label string) bool { + return strings.HasPrefix(label, operatorGroupLabelPrefix) +} diff --git a/pkg/operators/v1/zz_generated.deepcopy.go b/pkg/operators/v1/zz_generated.deepcopy.go index fdb23f86e..7b56a2b46 100644 --- a/pkg/operators/v1/zz_generated.deepcopy.go +++ b/pkg/operators/v1/zz_generated.deepcopy.go @@ -1,7 +1,6 @@ // +build !ignore_autogenerated /* -Copyright 2020 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,14 +15,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -33,7 +32,6 @@ func (in *OperatorGroup) DeepCopyInto(out *OperatorGroup) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroup. @@ -66,7 +64,6 @@ func (in *OperatorGroupList) DeepCopyInto(out *OperatorGroupList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupList. @@ -100,7 +97,6 @@ func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupSpec. @@ -130,7 +126,6 @@ func (in *OperatorGroupStatus) DeepCopyInto(out *OperatorGroupStatus) { in, out := &in.LastUpdated, &out.LastUpdated *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupStatus. diff --git a/pkg/operators/v1alpha1/catalogsource_types.go b/pkg/operators/v1alpha1/catalogsource_types.go index bb6f244a5..b89412dd3 100644 --- a/pkg/operators/v1alpha1/catalogsource_types.go +++ b/pkg/operators/v1alpha1/catalogsource_types.go @@ -29,7 +29,11 @@ const ( ) const ( - CatalogSourceConfigMapError ConditionReason = "ConfigMapError" + // CatalogSourceSpecInvalidError denotes when fields on the spec of the CatalogSource are not valid. + CatalogSourceSpecInvalidError ConditionReason = "SpecInvalidError" + // CatalogSourceConfigMapError denotes when there is an issue extracting manifests from the specified ConfigMap. + CatalogSourceConfigMapError ConditionReason = "ConfigMapError" + // CatalogSourceRegistryServerError denotes when there is an issue querying the specified registry server. CatalogSourceRegistryServerError ConditionReason = "RegistryServerError" ) @@ -104,10 +108,10 @@ type GRPCConnectionState struct { } type CatalogSourceStatus struct { - // A human readable message indicating details about why the ClusterServiceVersion is in this condition. + // A human readable message indicating details about why the CatalogSource is in this condition. // +optional Message string `json:"message,omitempty"` - // Reason is the reason the Subscription was transitioned to its current state. + // Reason is the reason the CatalogSource was transitioned to its current state. // +optional Reason ConditionReason `json:"reason,omitempty"` diff --git a/pkg/operators/v1alpha1/clusterserviceversion_types.go b/pkg/operators/v1alpha1/clusterserviceversion_types.go index 90e5516c1..6f3c1aa81 100644 --- a/pkg/operators/v1alpha1/clusterserviceversion_types.go +++ b/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -4,6 +4,9 @@ import ( "encoding/json" "fmt" "sort" + "strings" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" rbac "k8s.io/api/rbac/v1" @@ -77,31 +80,31 @@ func (d *StrategyDetailsDeployment) GetStrategyName() string { // StatusDescriptor describes a field in a status block of a CRD so that OLM can consume it // +k8s:openapi-gen=true type StatusDescriptor struct { - Path string `json:"path"` - DisplayName string `json:"displayName,omitempty"` - Description string `json:"description,omitempty"` - XDescriptors []string `json:"x-descriptors,omitempty"` - Value *json.RawMessage `json:"value,omitempty"` + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` } // SpecDescriptor describes a field in a spec block of a CRD so that OLM can consume it // +k8s:openapi-gen=true type SpecDescriptor struct { - Path string `json:"path"` - DisplayName string `json:"displayName,omitempty"` - Description string `json:"description,omitempty"` - XDescriptors []string `json:"x-descriptors,omitempty"` - Value *json.RawMessage `json:"value,omitempty"` + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` } // ActionDescriptor describes a declarative action that can be performed on a custom resource instance // +k8s:openapi-gen=true type ActionDescriptor struct { - Path string `json:"path"` - DisplayName string `json:"displayName,omitempty"` - Description string `json:"description,omitempty"` - XDescriptors []string `json:"x-descriptors,omitempty"` - Value *json.RawMessage `json:"value,omitempty"` + Path string `json:"path"` + DisplayName string `json:"displayName,omitempty"` + Description string `json:"description,omitempty"` + XDescriptors []string `json:"x-descriptors,omitempty"` + Value json.RawMessage `json:"value,omitempty"` } // CRDDescription provides details to OLM about the CRDs @@ -148,6 +151,87 @@ func (d APIServiceDescription) GetName() string { return fmt.Sprintf("%s.%s", d.Version, d.Group) } +// WebhookAdmissionType is the type of admission webhooks supported by OLM +type WebhookAdmissionType string + +const ( + // ValidatingAdmissionWebhook is for validating admission webhooks + ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook" + // MutatingAdmissionWebhook is for mutating admission webhooks + MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook" +) + +// WebhookDescription provides details to OLM about required webhooks +// +k8s:openapi-gen=true +type WebhookDescription struct { + Name string + Type WebhookAdmissionType + DeploymentName string + ContainerPort int32 + Rules []admissionregistrationv1.RuleWithOperations + FailurePolicy *admissionregistrationv1.FailurePolicyType + MatchPolicy *admissionregistrationv1.MatchPolicyType + ObjectSelector *metav1.LabelSelector + SideEffects *admissionregistrationv1.SideEffectClass + TimeoutSeconds *int32 + AdmissionReviewVersions []string + ReinvocationPolicy *admissionregistrationv1.ReinvocationPolicyType + WebhookPath *string +} + +// GetValidatingWebhook returns a ValidatingWebhook generated from the WebhookDescription +func (w *WebhookDescription) GetValidatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.ValidatingWebhook { + return admissionregistrationv1.ValidatingWebhook{ + Name: w.Name, + Rules: w.Rules, + FailurePolicy: w.FailurePolicy, + MatchPolicy: w.MatchPolicy, + NamespaceSelector: namespaceSelector, + ObjectSelector: w.ObjectSelector, + SideEffects: w.SideEffects, + TimeoutSeconds: w.TimeoutSeconds, + AdmissionReviewVersions: w.AdmissionReviewVersions, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: w.DomainName() + "-service", + Namespace: namespace, + Path: w.WebhookPath, + }, + CABundle: caBundle, + }, + } +} + +// GetMutatingWebhook returns a MutatingWebhook generated from the WebhookDescription +func (w *WebhookDescription) GetMutatingWebhook(namespace string, namespaceSelector *metav1.LabelSelector, caBundle []byte) admissionregistrationv1.MutatingWebhook { + return admissionregistrationv1.MutatingWebhook{ + Name: w.Name, + Rules: w.Rules, + FailurePolicy: w.FailurePolicy, + MatchPolicy: w.MatchPolicy, + NamespaceSelector: namespaceSelector, + ObjectSelector: w.ObjectSelector, + SideEffects: w.SideEffects, + TimeoutSeconds: w.TimeoutSeconds, + AdmissionReviewVersions: w.AdmissionReviewVersions, + ClientConfig: admissionregistrationv1.WebhookClientConfig{ + Service: &admissionregistrationv1.ServiceReference{ + Name: w.DomainName() + "-service", + Namespace: namespace, + Path: w.WebhookPath, + }, + CABundle: caBundle, + }, + ReinvocationPolicy: w.ReinvocationPolicy, + } +} + +// DomainName returns the result of replacing all periods in the given Webhook name with hyphens +func (w *WebhookDescription) DomainName() string { + // Replace all '.'s with "-"s to convert to a DNS-1035 label + return strings.Replace(w.DeploymentName, ".", "-", -1) +} + // CustomResourceDefinitions declares all of the CRDs managed or required by // an operator being ran by ClusterServiceVersion. // @@ -174,6 +258,7 @@ type ClusterServiceVersionSpec struct { Maturity string `json:"maturity,omitempty"` CustomResourceDefinitions CustomResourceDefinitions `json:"customresourcedefinitions,omitempty"` APIServiceDefinitions APIServiceDefinitions `json:"apiservicedefinitions,omitempty"` + WebhookDefinitions []WebhookDescription `json:"webhookdefinitions,omitempty"` NativeAPIs []metav1.GroupVersionKind `json:"nativeAPIs,omitempty"` MinKubeVersion string `json:"minKubeVersion,omitempty"` DisplayName string `json:"displayName"` @@ -280,8 +365,18 @@ const ( CSVReasonInterOperatorGroupOwnerConflict ConditionReason = "InterOperatorGroupOwnerConflict" CSVReasonCannotModifyStaticOperatorGroupProvidedAPIs ConditionReason = "CannotModifyStaticOperatorGroupProvidedAPIs" CSVReasonDetectedClusterChange ConditionReason = "DetectedClusterChange" + CSVReasonUnsupportedWebhookRules ConditionReason = "UnsupportedWebhookRules" ) +// HasCaResources returns true if the CSV has owned APIServices or Webhooks. +func (c *ClusterServiceVersion) HasCAResources() bool { + // Return early if there are no owned APIServices + if len(c.Spec.APIServiceDefinitions.Owned)+len(c.Spec.WebhookDefinitions) == 0 { + return false + } + return true +} + // Conditions appear in the status as a record of state transitions on the ClusterServiceVersion type ClusterServiceVersionCondition struct { // Condition of the ClusterServiceVersion diff --git a/pkg/operators/v1alpha1/installplan_types.go b/pkg/operators/v1alpha1/installplan_types.go index 05522337d..10e082eb8 100644 --- a/pkg/operators/v1alpha1/installplan_types.go +++ b/pkg/operators/v1alpha1/installplan_types.go @@ -28,6 +28,7 @@ type InstallPlanSpec struct { ClusterServiceVersionNames []string `json:"clusterServiceVersionNames"` Approval Approval `json:"approval"` Approved bool `json:"approved"` + Generation int `json:"generation"` } // InstallPlanPhase is the current status of a InstallPlan as a whole. @@ -251,6 +252,8 @@ type BundleLookup struct { // Path refers to the location of a bundle to pull. // It's typically an image reference. Path string `json:"path"` + // Identifier is the catalog-unique name of the operator (the name of the CSV for bundles that contain CSVs) + Identifier string `json:"identifier"` // Replaces is the name of the bundle to replace with the one found at Path. Replaces string `json:"replaces"` // CatalogSourceRef is a reference to the CatalogSource the bundle path was resolved from. @@ -305,39 +308,6 @@ func (b *BundleLookup) SetCondition(cond BundleLookupCondition) BundleLookupCond return cond } -// ManifestsMatch returns true if the CSV manifests in the StepResources of the given list of steps -// matches those in the InstallPlanStatus. -func (s *InstallPlanStatus) CSVManifestsMatch(steps []*Step) bool { - if s.Plan == nil && steps == nil { - return true - } - if s.Plan == nil || steps == nil { - return false - } - - manifests := make(map[string]struct{}) - for _, step := range s.Plan { - resource := step.Resource - if resource.Kind != ClusterServiceVersionKind { - continue - } - manifests[resource.Manifest] = struct{}{} - } - - for _, step := range steps { - resource := step.Resource - if resource.Kind != ClusterServiceVersionKind { - continue - } - if _, ok := manifests[resource.Manifest]; !ok { - return false - } - delete(manifests, resource.Manifest) - } - - return len(manifests) == 0 -} - func (s *Step) String() string { return fmt.Sprintf("%s: %s (%s)", s.Resolving, s.Resource, s.Status) } diff --git a/pkg/operators/v1alpha1/subscription_types.go b/pkg/operators/v1alpha1/subscription_types.go index 9e997bb61..b80334e17 100644 --- a/pkg/operators/v1alpha1/subscription_types.go +++ b/pkg/operators/v1alpha1/subscription_types.go @@ -186,6 +186,10 @@ type SubscriptionStatus struct { // +optional Reason ConditionReason `json:"reason,omitempty"` + // InstallPlanGeneration is the current generation of the installplan + // +optional + InstallPlanGeneration int `json:"installPlanGeneration,omitempty"` + // InstallPlanRef is a reference to the latest InstallPlan that contains the Subscription's current CSV. // +optional InstallPlanRef *corev1.ObjectReference `json:"installPlanRef,omitempty"` diff --git a/pkg/operators/v1alpha1/zz_generated.conversion.go b/pkg/operators/v1alpha1/zz_generated.conversion.go deleted file mode 100644 index c954ba4a7..000000000 --- a/pkg/operators/v1alpha1/zz_generated.conversion.go +++ /dev/null @@ -1,1960 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2020 Red Hat, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - json "encoding/json" - unsafe "unsafe" - - operators "github.com/operator-framework/api/pkg/operators" - v1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*APIResourceReference)(nil), (*operators.APIResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_APIResourceReference_To_operators_APIResourceReference(a.(*APIResourceReference), b.(*operators.APIResourceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.APIResourceReference)(nil), (*APIResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_APIResourceReference_To_v1alpha1_APIResourceReference(a.(*operators.APIResourceReference), b.(*APIResourceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*APIServiceDefinitions)(nil), (*operators.APIServiceDefinitions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_APIServiceDefinitions_To_operators_APIServiceDefinitions(a.(*APIServiceDefinitions), b.(*operators.APIServiceDefinitions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.APIServiceDefinitions)(nil), (*APIServiceDefinitions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_APIServiceDefinitions_To_v1alpha1_APIServiceDefinitions(a.(*operators.APIServiceDefinitions), b.(*APIServiceDefinitions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*APIServiceDescription)(nil), (*operators.APIServiceDescription)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_APIServiceDescription_To_operators_APIServiceDescription(a.(*APIServiceDescription), b.(*operators.APIServiceDescription), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.APIServiceDescription)(nil), (*APIServiceDescription)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_APIServiceDescription_To_v1alpha1_APIServiceDescription(a.(*operators.APIServiceDescription), b.(*APIServiceDescription), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ActionDescriptor)(nil), (*operators.ActionDescriptor)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ActionDescriptor_To_operators_ActionDescriptor(a.(*ActionDescriptor), b.(*operators.ActionDescriptor), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ActionDescriptor)(nil), (*ActionDescriptor)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ActionDescriptor_To_v1alpha1_ActionDescriptor(a.(*operators.ActionDescriptor), b.(*ActionDescriptor), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AppLink)(nil), (*operators.AppLink)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_AppLink_To_operators_AppLink(a.(*AppLink), b.(*operators.AppLink), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.AppLink)(nil), (*AppLink)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_AppLink_To_v1alpha1_AppLink(a.(*operators.AppLink), b.(*AppLink), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*BundleLookup)(nil), (*operators.BundleLookup)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_BundleLookup_To_operators_BundleLookup(a.(*BundleLookup), b.(*operators.BundleLookup), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.BundleLookup)(nil), (*BundleLookup)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_BundleLookup_To_v1alpha1_BundleLookup(a.(*operators.BundleLookup), b.(*BundleLookup), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*BundleLookupCondition)(nil), (*operators.BundleLookupCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_BundleLookupCondition_To_operators_BundleLookupCondition(a.(*BundleLookupCondition), b.(*operators.BundleLookupCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.BundleLookupCondition)(nil), (*BundleLookupCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_BundleLookupCondition_To_v1alpha1_BundleLookupCondition(a.(*operators.BundleLookupCondition), b.(*BundleLookupCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CRDDescription)(nil), (*operators.CRDDescription)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CRDDescription_To_operators_CRDDescription(a.(*CRDDescription), b.(*operators.CRDDescription), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.CRDDescription)(nil), (*CRDDescription)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_CRDDescription_To_v1alpha1_CRDDescription(a.(*operators.CRDDescription), b.(*CRDDescription), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CatalogSource)(nil), (*operators.CatalogSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CatalogSource_To_operators_CatalogSource(a.(*CatalogSource), b.(*operators.CatalogSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.CatalogSource)(nil), (*CatalogSource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_CatalogSource_To_v1alpha1_CatalogSource(a.(*operators.CatalogSource), b.(*CatalogSource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CatalogSourceList)(nil), (*operators.CatalogSourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CatalogSourceList_To_operators_CatalogSourceList(a.(*CatalogSourceList), b.(*operators.CatalogSourceList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.CatalogSourceList)(nil), (*CatalogSourceList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_CatalogSourceList_To_v1alpha1_CatalogSourceList(a.(*operators.CatalogSourceList), b.(*CatalogSourceList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CatalogSourceSpec)(nil), (*operators.CatalogSourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CatalogSourceSpec_To_operators_CatalogSourceSpec(a.(*CatalogSourceSpec), b.(*operators.CatalogSourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.CatalogSourceSpec)(nil), (*CatalogSourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_CatalogSourceSpec_To_v1alpha1_CatalogSourceSpec(a.(*operators.CatalogSourceSpec), b.(*CatalogSourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CatalogSourceStatus)(nil), (*operators.CatalogSourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CatalogSourceStatus_To_operators_CatalogSourceStatus(a.(*CatalogSourceStatus), b.(*operators.CatalogSourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.CatalogSourceStatus)(nil), (*CatalogSourceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_CatalogSourceStatus_To_v1alpha1_CatalogSourceStatus(a.(*operators.CatalogSourceStatus), b.(*CatalogSourceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterServiceVersion)(nil), (*operators.ClusterServiceVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterServiceVersion_To_operators_ClusterServiceVersion(a.(*ClusterServiceVersion), b.(*operators.ClusterServiceVersion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ClusterServiceVersion)(nil), (*ClusterServiceVersion)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ClusterServiceVersion_To_v1alpha1_ClusterServiceVersion(a.(*operators.ClusterServiceVersion), b.(*ClusterServiceVersion), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterServiceVersionCondition)(nil), (*operators.ClusterServiceVersionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterServiceVersionCondition_To_operators_ClusterServiceVersionCondition(a.(*ClusterServiceVersionCondition), b.(*operators.ClusterServiceVersionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ClusterServiceVersionCondition)(nil), (*ClusterServiceVersionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ClusterServiceVersionCondition_To_v1alpha1_ClusterServiceVersionCondition(a.(*operators.ClusterServiceVersionCondition), b.(*ClusterServiceVersionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterServiceVersionList)(nil), (*operators.ClusterServiceVersionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterServiceVersionList_To_operators_ClusterServiceVersionList(a.(*ClusterServiceVersionList), b.(*operators.ClusterServiceVersionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ClusterServiceVersionList)(nil), (*ClusterServiceVersionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ClusterServiceVersionList_To_v1alpha1_ClusterServiceVersionList(a.(*operators.ClusterServiceVersionList), b.(*ClusterServiceVersionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterServiceVersionSpec)(nil), (*operators.ClusterServiceVersionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterServiceVersionSpec_To_operators_ClusterServiceVersionSpec(a.(*ClusterServiceVersionSpec), b.(*operators.ClusterServiceVersionSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ClusterServiceVersionSpec)(nil), (*ClusterServiceVersionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ClusterServiceVersionSpec_To_v1alpha1_ClusterServiceVersionSpec(a.(*operators.ClusterServiceVersionSpec), b.(*ClusterServiceVersionSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ClusterServiceVersionStatus)(nil), (*operators.ClusterServiceVersionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterServiceVersionStatus_To_operators_ClusterServiceVersionStatus(a.(*ClusterServiceVersionStatus), b.(*operators.ClusterServiceVersionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ClusterServiceVersionStatus)(nil), (*ClusterServiceVersionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ClusterServiceVersionStatus_To_v1alpha1_ClusterServiceVersionStatus(a.(*operators.ClusterServiceVersionStatus), b.(*ClusterServiceVersionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ConfigMapResourceReference)(nil), (*operators.ConfigMapResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ConfigMapResourceReference_To_operators_ConfigMapResourceReference(a.(*ConfigMapResourceReference), b.(*operators.ConfigMapResourceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.ConfigMapResourceReference)(nil), (*ConfigMapResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_ConfigMapResourceReference_To_v1alpha1_ConfigMapResourceReference(a.(*operators.ConfigMapResourceReference), b.(*ConfigMapResourceReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*CustomResourceDefinitions)(nil), (*operators.CustomResourceDefinitions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_CustomResourceDefinitions_To_operators_CustomResourceDefinitions(a.(*CustomResourceDefinitions), b.(*operators.CustomResourceDefinitions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.CustomResourceDefinitions)(nil), (*CustomResourceDefinitions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_CustomResourceDefinitions_To_v1alpha1_CustomResourceDefinitions(a.(*operators.CustomResourceDefinitions), b.(*CustomResourceDefinitions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*DependentStatus)(nil), (*operators.DependentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_DependentStatus_To_operators_DependentStatus(a.(*DependentStatus), b.(*operators.DependentStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.DependentStatus)(nil), (*DependentStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_DependentStatus_To_v1alpha1_DependentStatus(a.(*operators.DependentStatus), b.(*DependentStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*GRPCConnectionState)(nil), (*operators.GRPCConnectionState)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_GRPCConnectionState_To_operators_GRPCConnectionState(a.(*GRPCConnectionState), b.(*operators.GRPCConnectionState), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.GRPCConnectionState)(nil), (*GRPCConnectionState)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_GRPCConnectionState_To_v1alpha1_GRPCConnectionState(a.(*operators.GRPCConnectionState), b.(*GRPCConnectionState), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Icon)(nil), (*operators.Icon)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Icon_To_operators_Icon(a.(*Icon), b.(*operators.Icon), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.Icon)(nil), (*Icon)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_Icon_To_v1alpha1_Icon(a.(*operators.Icon), b.(*Icon), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallMode)(nil), (*operators.InstallMode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallMode_To_operators_InstallMode(a.(*InstallMode), b.(*operators.InstallMode), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallMode)(nil), (*InstallMode)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallMode_To_v1alpha1_InstallMode(a.(*operators.InstallMode), b.(*InstallMode), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallPlan)(nil), (*operators.InstallPlan)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallPlan_To_operators_InstallPlan(a.(*InstallPlan), b.(*operators.InstallPlan), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallPlan)(nil), (*InstallPlan)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallPlan_To_v1alpha1_InstallPlan(a.(*operators.InstallPlan), b.(*InstallPlan), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallPlanCondition)(nil), (*operators.InstallPlanCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallPlanCondition_To_operators_InstallPlanCondition(a.(*InstallPlanCondition), b.(*operators.InstallPlanCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallPlanCondition)(nil), (*InstallPlanCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallPlanCondition_To_v1alpha1_InstallPlanCondition(a.(*operators.InstallPlanCondition), b.(*InstallPlanCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallPlanList)(nil), (*operators.InstallPlanList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallPlanList_To_operators_InstallPlanList(a.(*InstallPlanList), b.(*operators.InstallPlanList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallPlanList)(nil), (*InstallPlanList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallPlanList_To_v1alpha1_InstallPlanList(a.(*operators.InstallPlanList), b.(*InstallPlanList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallPlanReference)(nil), (*operators.InstallPlanReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallPlanReference_To_operators_InstallPlanReference(a.(*InstallPlanReference), b.(*operators.InstallPlanReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallPlanReference)(nil), (*InstallPlanReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallPlanReference_To_v1alpha1_InstallPlanReference(a.(*operators.InstallPlanReference), b.(*InstallPlanReference), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallPlanSpec)(nil), (*operators.InstallPlanSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallPlanSpec_To_operators_InstallPlanSpec(a.(*InstallPlanSpec), b.(*operators.InstallPlanSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallPlanSpec)(nil), (*InstallPlanSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallPlanSpec_To_v1alpha1_InstallPlanSpec(a.(*operators.InstallPlanSpec), b.(*InstallPlanSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*InstallPlanStatus)(nil), (*operators.InstallPlanStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_InstallPlanStatus_To_operators_InstallPlanStatus(a.(*InstallPlanStatus), b.(*operators.InstallPlanStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.InstallPlanStatus)(nil), (*InstallPlanStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_InstallPlanStatus_To_v1alpha1_InstallPlanStatus(a.(*operators.InstallPlanStatus), b.(*InstallPlanStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Maintainer)(nil), (*operators.Maintainer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Maintainer_To_operators_Maintainer(a.(*Maintainer), b.(*operators.Maintainer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.Maintainer)(nil), (*Maintainer)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_Maintainer_To_v1alpha1_Maintainer(a.(*operators.Maintainer), b.(*Maintainer), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NamedInstallStrategy)(nil), (*operators.NamedInstallStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_NamedInstallStrategy_To_operators_NamedInstallStrategy(a.(*NamedInstallStrategy), b.(*operators.NamedInstallStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.NamedInstallStrategy)(nil), (*NamedInstallStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_NamedInstallStrategy_To_v1alpha1_NamedInstallStrategy(a.(*operators.NamedInstallStrategy), b.(*NamedInstallStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*RegistryPoll)(nil), (*operators.RegistryPoll)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_RegistryPoll_To_operators_RegistryPoll(a.(*RegistryPoll), b.(*operators.RegistryPoll), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.RegistryPoll)(nil), (*RegistryPoll)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_RegistryPoll_To_v1alpha1_RegistryPoll(a.(*operators.RegistryPoll), b.(*RegistryPoll), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*RegistryServiceStatus)(nil), (*operators.RegistryServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_RegistryServiceStatus_To_operators_RegistryServiceStatus(a.(*RegistryServiceStatus), b.(*operators.RegistryServiceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.RegistryServiceStatus)(nil), (*RegistryServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_RegistryServiceStatus_To_v1alpha1_RegistryServiceStatus(a.(*operators.RegistryServiceStatus), b.(*RegistryServiceStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*RequirementStatus)(nil), (*operators.RequirementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_RequirementStatus_To_operators_RequirementStatus(a.(*RequirementStatus), b.(*operators.RequirementStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.RequirementStatus)(nil), (*RequirementStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_RequirementStatus_To_v1alpha1_RequirementStatus(a.(*operators.RequirementStatus), b.(*RequirementStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SpecDescriptor)(nil), (*operators.SpecDescriptor)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SpecDescriptor_To_operators_SpecDescriptor(a.(*SpecDescriptor), b.(*operators.SpecDescriptor), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SpecDescriptor)(nil), (*SpecDescriptor)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SpecDescriptor_To_v1alpha1_SpecDescriptor(a.(*operators.SpecDescriptor), b.(*SpecDescriptor), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*StatusDescriptor)(nil), (*operators.StatusDescriptor)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_StatusDescriptor_To_operators_StatusDescriptor(a.(*StatusDescriptor), b.(*operators.StatusDescriptor), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.StatusDescriptor)(nil), (*StatusDescriptor)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_StatusDescriptor_To_v1alpha1_StatusDescriptor(a.(*operators.StatusDescriptor), b.(*StatusDescriptor), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Step)(nil), (*operators.Step)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Step_To_operators_Step(a.(*Step), b.(*operators.Step), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.Step)(nil), (*Step)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_Step_To_v1alpha1_Step(a.(*operators.Step), b.(*Step), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*StepResource)(nil), (*operators.StepResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_StepResource_To_operators_StepResource(a.(*StepResource), b.(*operators.StepResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.StepResource)(nil), (*StepResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_StepResource_To_v1alpha1_StepResource(a.(*operators.StepResource), b.(*StepResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*StrategyDeploymentPermissions)(nil), (*operators.StrategyDeploymentPermissions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_StrategyDeploymentPermissions_To_operators_StrategyDeploymentPermissions(a.(*StrategyDeploymentPermissions), b.(*operators.StrategyDeploymentPermissions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.StrategyDeploymentPermissions)(nil), (*StrategyDeploymentPermissions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_StrategyDeploymentPermissions_To_v1alpha1_StrategyDeploymentPermissions(a.(*operators.StrategyDeploymentPermissions), b.(*StrategyDeploymentPermissions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*StrategyDeploymentSpec)(nil), (*operators.StrategyDeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_StrategyDeploymentSpec_To_operators_StrategyDeploymentSpec(a.(*StrategyDeploymentSpec), b.(*operators.StrategyDeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.StrategyDeploymentSpec)(nil), (*StrategyDeploymentSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_StrategyDeploymentSpec_To_v1alpha1_StrategyDeploymentSpec(a.(*operators.StrategyDeploymentSpec), b.(*StrategyDeploymentSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*StrategyDetailsDeployment)(nil), (*operators.StrategyDetailsDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_StrategyDetailsDeployment_To_operators_StrategyDetailsDeployment(a.(*StrategyDetailsDeployment), b.(*operators.StrategyDetailsDeployment), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.StrategyDetailsDeployment)(nil), (*StrategyDetailsDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_StrategyDetailsDeployment_To_v1alpha1_StrategyDetailsDeployment(a.(*operators.StrategyDetailsDeployment), b.(*StrategyDetailsDeployment), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*Subscription)(nil), (*operators.Subscription)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_Subscription_To_operators_Subscription(a.(*Subscription), b.(*operators.Subscription), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.Subscription)(nil), (*Subscription)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_Subscription_To_v1alpha1_Subscription(a.(*operators.Subscription), b.(*Subscription), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SubscriptionCatalogHealth)(nil), (*operators.SubscriptionCatalogHealth)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SubscriptionCatalogHealth_To_operators_SubscriptionCatalogHealth(a.(*SubscriptionCatalogHealth), b.(*operators.SubscriptionCatalogHealth), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SubscriptionCatalogHealth)(nil), (*SubscriptionCatalogHealth)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SubscriptionCatalogHealth_To_v1alpha1_SubscriptionCatalogHealth(a.(*operators.SubscriptionCatalogHealth), b.(*SubscriptionCatalogHealth), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SubscriptionCondition)(nil), (*operators.SubscriptionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SubscriptionCondition_To_operators_SubscriptionCondition(a.(*SubscriptionCondition), b.(*operators.SubscriptionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SubscriptionCondition)(nil), (*SubscriptionCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SubscriptionCondition_To_v1alpha1_SubscriptionCondition(a.(*operators.SubscriptionCondition), b.(*SubscriptionCondition), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SubscriptionConfig)(nil), (*operators.SubscriptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SubscriptionConfig_To_operators_SubscriptionConfig(a.(*SubscriptionConfig), b.(*operators.SubscriptionConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SubscriptionConfig)(nil), (*SubscriptionConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SubscriptionConfig_To_v1alpha1_SubscriptionConfig(a.(*operators.SubscriptionConfig), b.(*SubscriptionConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SubscriptionList)(nil), (*operators.SubscriptionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SubscriptionList_To_operators_SubscriptionList(a.(*SubscriptionList), b.(*operators.SubscriptionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SubscriptionList)(nil), (*SubscriptionList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SubscriptionList_To_v1alpha1_SubscriptionList(a.(*operators.SubscriptionList), b.(*SubscriptionList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SubscriptionSpec)(nil), (*operators.SubscriptionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SubscriptionSpec_To_operators_SubscriptionSpec(a.(*SubscriptionSpec), b.(*operators.SubscriptionSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SubscriptionSpec)(nil), (*SubscriptionSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SubscriptionSpec_To_v1alpha1_SubscriptionSpec(a.(*operators.SubscriptionSpec), b.(*SubscriptionSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*SubscriptionStatus)(nil), (*operators.SubscriptionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_SubscriptionStatus_To_operators_SubscriptionStatus(a.(*SubscriptionStatus), b.(*operators.SubscriptionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.SubscriptionStatus)(nil), (*SubscriptionStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_SubscriptionStatus_To_v1alpha1_SubscriptionStatus(a.(*operators.SubscriptionStatus), b.(*SubscriptionStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*UpdateStrategy)(nil), (*operators.UpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_UpdateStrategy_To_operators_UpdateStrategy(a.(*UpdateStrategy), b.(*operators.UpdateStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*operators.UpdateStrategy)(nil), (*UpdateStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_operators_UpdateStrategy_To_v1alpha1_UpdateStrategy(a.(*operators.UpdateStrategy), b.(*UpdateStrategy), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1alpha1_APIResourceReference_To_operators_APIResourceReference(in *APIResourceReference, out *operators.APIResourceReference, s conversion.Scope) error { - out.Name = in.Name - out.Kind = in.Kind - out.Version = in.Version - return nil -} - -// Convert_v1alpha1_APIResourceReference_To_operators_APIResourceReference is an autogenerated conversion function. -func Convert_v1alpha1_APIResourceReference_To_operators_APIResourceReference(in *APIResourceReference, out *operators.APIResourceReference, s conversion.Scope) error { - return autoConvert_v1alpha1_APIResourceReference_To_operators_APIResourceReference(in, out, s) -} - -func autoConvert_operators_APIResourceReference_To_v1alpha1_APIResourceReference(in *operators.APIResourceReference, out *APIResourceReference, s conversion.Scope) error { - out.Name = in.Name - out.Kind = in.Kind - out.Version = in.Version - return nil -} - -// Convert_operators_APIResourceReference_To_v1alpha1_APIResourceReference is an autogenerated conversion function. -func Convert_operators_APIResourceReference_To_v1alpha1_APIResourceReference(in *operators.APIResourceReference, out *APIResourceReference, s conversion.Scope) error { - return autoConvert_operators_APIResourceReference_To_v1alpha1_APIResourceReference(in, out, s) -} - -func autoConvert_v1alpha1_APIServiceDefinitions_To_operators_APIServiceDefinitions(in *APIServiceDefinitions, out *operators.APIServiceDefinitions, s conversion.Scope) error { - out.Owned = *(*[]operators.APIServiceDescription)(unsafe.Pointer(&in.Owned)) - out.Required = *(*[]operators.APIServiceDescription)(unsafe.Pointer(&in.Required)) - return nil -} - -// Convert_v1alpha1_APIServiceDefinitions_To_operators_APIServiceDefinitions is an autogenerated conversion function. -func Convert_v1alpha1_APIServiceDefinitions_To_operators_APIServiceDefinitions(in *APIServiceDefinitions, out *operators.APIServiceDefinitions, s conversion.Scope) error { - return autoConvert_v1alpha1_APIServiceDefinitions_To_operators_APIServiceDefinitions(in, out, s) -} - -func autoConvert_operators_APIServiceDefinitions_To_v1alpha1_APIServiceDefinitions(in *operators.APIServiceDefinitions, out *APIServiceDefinitions, s conversion.Scope) error { - out.Owned = *(*[]APIServiceDescription)(unsafe.Pointer(&in.Owned)) - out.Required = *(*[]APIServiceDescription)(unsafe.Pointer(&in.Required)) - return nil -} - -// Convert_operators_APIServiceDefinitions_To_v1alpha1_APIServiceDefinitions is an autogenerated conversion function. -func Convert_operators_APIServiceDefinitions_To_v1alpha1_APIServiceDefinitions(in *operators.APIServiceDefinitions, out *APIServiceDefinitions, s conversion.Scope) error { - return autoConvert_operators_APIServiceDefinitions_To_v1alpha1_APIServiceDefinitions(in, out, s) -} - -func autoConvert_v1alpha1_APIServiceDescription_To_operators_APIServiceDescription(in *APIServiceDescription, out *operators.APIServiceDescription, s conversion.Scope) error { - out.Name = in.Name - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.DeploymentName = in.DeploymentName - out.ContainerPort = in.ContainerPort - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Resources = *(*[]operators.APIResourceReference)(unsafe.Pointer(&in.Resources)) - out.StatusDescriptors = *(*[]operators.StatusDescriptor)(unsafe.Pointer(&in.StatusDescriptors)) - out.SpecDescriptors = *(*[]operators.SpecDescriptor)(unsafe.Pointer(&in.SpecDescriptors)) - out.ActionDescriptor = *(*[]operators.ActionDescriptor)(unsafe.Pointer(&in.ActionDescriptor)) - return nil -} - -// Convert_v1alpha1_APIServiceDescription_To_operators_APIServiceDescription is an autogenerated conversion function. -func Convert_v1alpha1_APIServiceDescription_To_operators_APIServiceDescription(in *APIServiceDescription, out *operators.APIServiceDescription, s conversion.Scope) error { - return autoConvert_v1alpha1_APIServiceDescription_To_operators_APIServiceDescription(in, out, s) -} - -func autoConvert_operators_APIServiceDescription_To_v1alpha1_APIServiceDescription(in *operators.APIServiceDescription, out *APIServiceDescription, s conversion.Scope) error { - out.Name = in.Name - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.DeploymentName = in.DeploymentName - out.ContainerPort = in.ContainerPort - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Resources = *(*[]APIResourceReference)(unsafe.Pointer(&in.Resources)) - out.StatusDescriptors = *(*[]StatusDescriptor)(unsafe.Pointer(&in.StatusDescriptors)) - out.SpecDescriptors = *(*[]SpecDescriptor)(unsafe.Pointer(&in.SpecDescriptors)) - out.ActionDescriptor = *(*[]ActionDescriptor)(unsafe.Pointer(&in.ActionDescriptor)) - return nil -} - -// Convert_operators_APIServiceDescription_To_v1alpha1_APIServiceDescription is an autogenerated conversion function. -func Convert_operators_APIServiceDescription_To_v1alpha1_APIServiceDescription(in *operators.APIServiceDescription, out *APIServiceDescription, s conversion.Scope) error { - return autoConvert_operators_APIServiceDescription_To_v1alpha1_APIServiceDescription(in, out, s) -} - -func autoConvert_v1alpha1_ActionDescriptor_To_operators_ActionDescriptor(in *ActionDescriptor, out *operators.ActionDescriptor, s conversion.Scope) error { - out.Path = in.Path - out.DisplayName = in.DisplayName - out.Description = in.Description - out.XDescriptors = *(*[]string)(unsafe.Pointer(&in.XDescriptors)) - out.Value = (*json.RawMessage)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_v1alpha1_ActionDescriptor_To_operators_ActionDescriptor is an autogenerated conversion function. -func Convert_v1alpha1_ActionDescriptor_To_operators_ActionDescriptor(in *ActionDescriptor, out *operators.ActionDescriptor, s conversion.Scope) error { - return autoConvert_v1alpha1_ActionDescriptor_To_operators_ActionDescriptor(in, out, s) -} - -func autoConvert_operators_ActionDescriptor_To_v1alpha1_ActionDescriptor(in *operators.ActionDescriptor, out *ActionDescriptor, s conversion.Scope) error { - out.Path = in.Path - out.DisplayName = in.DisplayName - out.Description = in.Description - out.XDescriptors = *(*[]string)(unsafe.Pointer(&in.XDescriptors)) - out.Value = (*json.RawMessage)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_operators_ActionDescriptor_To_v1alpha1_ActionDescriptor is an autogenerated conversion function. -func Convert_operators_ActionDescriptor_To_v1alpha1_ActionDescriptor(in *operators.ActionDescriptor, out *ActionDescriptor, s conversion.Scope) error { - return autoConvert_operators_ActionDescriptor_To_v1alpha1_ActionDescriptor(in, out, s) -} - -func autoConvert_v1alpha1_AppLink_To_operators_AppLink(in *AppLink, out *operators.AppLink, s conversion.Scope) error { - out.Name = in.Name - out.URL = in.URL - return nil -} - -// Convert_v1alpha1_AppLink_To_operators_AppLink is an autogenerated conversion function. -func Convert_v1alpha1_AppLink_To_operators_AppLink(in *AppLink, out *operators.AppLink, s conversion.Scope) error { - return autoConvert_v1alpha1_AppLink_To_operators_AppLink(in, out, s) -} - -func autoConvert_operators_AppLink_To_v1alpha1_AppLink(in *operators.AppLink, out *AppLink, s conversion.Scope) error { - out.Name = in.Name - out.URL = in.URL - return nil -} - -// Convert_operators_AppLink_To_v1alpha1_AppLink is an autogenerated conversion function. -func Convert_operators_AppLink_To_v1alpha1_AppLink(in *operators.AppLink, out *AppLink, s conversion.Scope) error { - return autoConvert_operators_AppLink_To_v1alpha1_AppLink(in, out, s) -} - -func autoConvert_v1alpha1_BundleLookup_To_operators_BundleLookup(in *BundleLookup, out *operators.BundleLookup, s conversion.Scope) error { - out.Path = in.Path - out.Replaces = in.Replaces - out.CatalogSourceRef = (*v1.ObjectReference)(unsafe.Pointer(in.CatalogSourceRef)) - out.Conditions = *(*[]operators.BundleLookupCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1alpha1_BundleLookup_To_operators_BundleLookup is an autogenerated conversion function. -func Convert_v1alpha1_BundleLookup_To_operators_BundleLookup(in *BundleLookup, out *operators.BundleLookup, s conversion.Scope) error { - return autoConvert_v1alpha1_BundleLookup_To_operators_BundleLookup(in, out, s) -} - -func autoConvert_operators_BundleLookup_To_v1alpha1_BundleLookup(in *operators.BundleLookup, out *BundleLookup, s conversion.Scope) error { - out.Path = in.Path - out.Replaces = in.Replaces - out.CatalogSourceRef = (*v1.ObjectReference)(unsafe.Pointer(in.CatalogSourceRef)) - out.Conditions = *(*[]BundleLookupCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_operators_BundleLookup_To_v1alpha1_BundleLookup is an autogenerated conversion function. -func Convert_operators_BundleLookup_To_v1alpha1_BundleLookup(in *operators.BundleLookup, out *BundleLookup, s conversion.Scope) error { - return autoConvert_operators_BundleLookup_To_v1alpha1_BundleLookup(in, out, s) -} - -func autoConvert_v1alpha1_BundleLookupCondition_To_operators_BundleLookupCondition(in *BundleLookupCondition, out *operators.BundleLookupCondition, s conversion.Scope) error { - out.Type = operators.BundleLookupConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - return nil -} - -// Convert_v1alpha1_BundleLookupCondition_To_operators_BundleLookupCondition is an autogenerated conversion function. -func Convert_v1alpha1_BundleLookupCondition_To_operators_BundleLookupCondition(in *BundleLookupCondition, out *operators.BundleLookupCondition, s conversion.Scope) error { - return autoConvert_v1alpha1_BundleLookupCondition_To_operators_BundleLookupCondition(in, out, s) -} - -func autoConvert_operators_BundleLookupCondition_To_v1alpha1_BundleLookupCondition(in *operators.BundleLookupCondition, out *BundleLookupCondition, s conversion.Scope) error { - out.Type = BundleLookupConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Reason = in.Reason - out.Message = in.Message - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - return nil -} - -// Convert_operators_BundleLookupCondition_To_v1alpha1_BundleLookupCondition is an autogenerated conversion function. -func Convert_operators_BundleLookupCondition_To_v1alpha1_BundleLookupCondition(in *operators.BundleLookupCondition, out *BundleLookupCondition, s conversion.Scope) error { - return autoConvert_operators_BundleLookupCondition_To_v1alpha1_BundleLookupCondition(in, out, s) -} - -func autoConvert_v1alpha1_CRDDescription_To_operators_CRDDescription(in *CRDDescription, out *operators.CRDDescription, s conversion.Scope) error { - out.Name = in.Name - out.Version = in.Version - out.Kind = in.Kind - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Resources = *(*[]operators.APIResourceReference)(unsafe.Pointer(&in.Resources)) - out.StatusDescriptors = *(*[]operators.StatusDescriptor)(unsafe.Pointer(&in.StatusDescriptors)) - out.SpecDescriptors = *(*[]operators.SpecDescriptor)(unsafe.Pointer(&in.SpecDescriptors)) - out.ActionDescriptor = *(*[]operators.ActionDescriptor)(unsafe.Pointer(&in.ActionDescriptor)) - return nil -} - -// Convert_v1alpha1_CRDDescription_To_operators_CRDDescription is an autogenerated conversion function. -func Convert_v1alpha1_CRDDescription_To_operators_CRDDescription(in *CRDDescription, out *operators.CRDDescription, s conversion.Scope) error { - return autoConvert_v1alpha1_CRDDescription_To_operators_CRDDescription(in, out, s) -} - -func autoConvert_operators_CRDDescription_To_v1alpha1_CRDDescription(in *operators.CRDDescription, out *CRDDescription, s conversion.Scope) error { - out.Name = in.Name - out.Version = in.Version - out.Kind = in.Kind - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Resources = *(*[]APIResourceReference)(unsafe.Pointer(&in.Resources)) - out.StatusDescriptors = *(*[]StatusDescriptor)(unsafe.Pointer(&in.StatusDescriptors)) - out.SpecDescriptors = *(*[]SpecDescriptor)(unsafe.Pointer(&in.SpecDescriptors)) - out.ActionDescriptor = *(*[]ActionDescriptor)(unsafe.Pointer(&in.ActionDescriptor)) - return nil -} - -// Convert_operators_CRDDescription_To_v1alpha1_CRDDescription is an autogenerated conversion function. -func Convert_operators_CRDDescription_To_v1alpha1_CRDDescription(in *operators.CRDDescription, out *CRDDescription, s conversion.Scope) error { - return autoConvert_operators_CRDDescription_To_v1alpha1_CRDDescription(in, out, s) -} - -func autoConvert_v1alpha1_CatalogSource_To_operators_CatalogSource(in *CatalogSource, out *operators.CatalogSource, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_CatalogSourceSpec_To_operators_CatalogSourceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_CatalogSourceStatus_To_operators_CatalogSourceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_CatalogSource_To_operators_CatalogSource is an autogenerated conversion function. -func Convert_v1alpha1_CatalogSource_To_operators_CatalogSource(in *CatalogSource, out *operators.CatalogSource, s conversion.Scope) error { - return autoConvert_v1alpha1_CatalogSource_To_operators_CatalogSource(in, out, s) -} - -func autoConvert_operators_CatalogSource_To_v1alpha1_CatalogSource(in *operators.CatalogSource, out *CatalogSource, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_operators_CatalogSourceSpec_To_v1alpha1_CatalogSourceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_operators_CatalogSourceStatus_To_v1alpha1_CatalogSourceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_operators_CatalogSource_To_v1alpha1_CatalogSource is an autogenerated conversion function. -func Convert_operators_CatalogSource_To_v1alpha1_CatalogSource(in *operators.CatalogSource, out *CatalogSource, s conversion.Scope) error { - return autoConvert_operators_CatalogSource_To_v1alpha1_CatalogSource(in, out, s) -} - -func autoConvert_v1alpha1_CatalogSourceList_To_operators_CatalogSourceList(in *CatalogSourceList, out *operators.CatalogSourceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]operators.CatalogSource, len(*in)) - for i := range *in { - if err := Convert_v1alpha1_CatalogSource_To_operators_CatalogSource(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1alpha1_CatalogSourceList_To_operators_CatalogSourceList is an autogenerated conversion function. -func Convert_v1alpha1_CatalogSourceList_To_operators_CatalogSourceList(in *CatalogSourceList, out *operators.CatalogSourceList, s conversion.Scope) error { - return autoConvert_v1alpha1_CatalogSourceList_To_operators_CatalogSourceList(in, out, s) -} - -func autoConvert_operators_CatalogSourceList_To_v1alpha1_CatalogSourceList(in *operators.CatalogSourceList, out *CatalogSourceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CatalogSource, len(*in)) - for i := range *in { - if err := Convert_operators_CatalogSource_To_v1alpha1_CatalogSource(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_operators_CatalogSourceList_To_v1alpha1_CatalogSourceList is an autogenerated conversion function. -func Convert_operators_CatalogSourceList_To_v1alpha1_CatalogSourceList(in *operators.CatalogSourceList, out *CatalogSourceList, s conversion.Scope) error { - return autoConvert_operators_CatalogSourceList_To_v1alpha1_CatalogSourceList(in, out, s) -} - -func autoConvert_v1alpha1_CatalogSourceSpec_To_operators_CatalogSourceSpec(in *CatalogSourceSpec, out *operators.CatalogSourceSpec, s conversion.Scope) error { - out.SourceType = operators.SourceType(in.SourceType) - out.ConfigMap = in.ConfigMap - out.Address = in.Address - out.Image = in.Image - out.UpdateStrategy = (*operators.UpdateStrategy)(unsafe.Pointer(in.UpdateStrategy)) - out.Secrets = *(*[]string)(unsafe.Pointer(&in.Secrets)) - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Publisher = in.Publisher - if err := Convert_v1alpha1_Icon_To_operators_Icon(&in.Icon, &out.Icon, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_CatalogSourceSpec_To_operators_CatalogSourceSpec is an autogenerated conversion function. -func Convert_v1alpha1_CatalogSourceSpec_To_operators_CatalogSourceSpec(in *CatalogSourceSpec, out *operators.CatalogSourceSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_CatalogSourceSpec_To_operators_CatalogSourceSpec(in, out, s) -} - -func autoConvert_operators_CatalogSourceSpec_To_v1alpha1_CatalogSourceSpec(in *operators.CatalogSourceSpec, out *CatalogSourceSpec, s conversion.Scope) error { - out.SourceType = SourceType(in.SourceType) - out.ConfigMap = in.ConfigMap - out.Address = in.Address - out.Image = in.Image - out.UpdateStrategy = (*UpdateStrategy)(unsafe.Pointer(in.UpdateStrategy)) - out.Secrets = *(*[]string)(unsafe.Pointer(&in.Secrets)) - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Publisher = in.Publisher - if err := Convert_operators_Icon_To_v1alpha1_Icon(&in.Icon, &out.Icon, s); err != nil { - return err - } - return nil -} - -// Convert_operators_CatalogSourceSpec_To_v1alpha1_CatalogSourceSpec is an autogenerated conversion function. -func Convert_operators_CatalogSourceSpec_To_v1alpha1_CatalogSourceSpec(in *operators.CatalogSourceSpec, out *CatalogSourceSpec, s conversion.Scope) error { - return autoConvert_operators_CatalogSourceSpec_To_v1alpha1_CatalogSourceSpec(in, out, s) -} - -func autoConvert_v1alpha1_CatalogSourceStatus_To_operators_CatalogSourceStatus(in *CatalogSourceStatus, out *operators.CatalogSourceStatus, s conversion.Scope) error { - out.Message = in.Message - out.Reason = operators.ConditionReason(in.Reason) - out.LatestImageRegistryPoll = (*metav1.Time)(unsafe.Pointer(in.LatestImageRegistryPoll)) - out.ConfigMapResource = (*operators.ConfigMapResourceReference)(unsafe.Pointer(in.ConfigMapResource)) - out.RegistryServiceStatus = (*operators.RegistryServiceStatus)(unsafe.Pointer(in.RegistryServiceStatus)) - out.GRPCConnectionState = (*operators.GRPCConnectionState)(unsafe.Pointer(in.GRPCConnectionState)) - return nil -} - -// Convert_v1alpha1_CatalogSourceStatus_To_operators_CatalogSourceStatus is an autogenerated conversion function. -func Convert_v1alpha1_CatalogSourceStatus_To_operators_CatalogSourceStatus(in *CatalogSourceStatus, out *operators.CatalogSourceStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_CatalogSourceStatus_To_operators_CatalogSourceStatus(in, out, s) -} - -func autoConvert_operators_CatalogSourceStatus_To_v1alpha1_CatalogSourceStatus(in *operators.CatalogSourceStatus, out *CatalogSourceStatus, s conversion.Scope) error { - out.Message = in.Message - out.Reason = ConditionReason(in.Reason) - out.ConfigMapResource = (*ConfigMapResourceReference)(unsafe.Pointer(in.ConfigMapResource)) - out.RegistryServiceStatus = (*RegistryServiceStatus)(unsafe.Pointer(in.RegistryServiceStatus)) - out.GRPCConnectionState = (*GRPCConnectionState)(unsafe.Pointer(in.GRPCConnectionState)) - out.LatestImageRegistryPoll = (*metav1.Time)(unsafe.Pointer(in.LatestImageRegistryPoll)) - return nil -} - -// Convert_operators_CatalogSourceStatus_To_v1alpha1_CatalogSourceStatus is an autogenerated conversion function. -func Convert_operators_CatalogSourceStatus_To_v1alpha1_CatalogSourceStatus(in *operators.CatalogSourceStatus, out *CatalogSourceStatus, s conversion.Scope) error { - return autoConvert_operators_CatalogSourceStatus_To_v1alpha1_CatalogSourceStatus(in, out, s) -} - -func autoConvert_v1alpha1_ClusterServiceVersion_To_operators_ClusterServiceVersion(in *ClusterServiceVersion, out *operators.ClusterServiceVersion, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_ClusterServiceVersionSpec_To_operators_ClusterServiceVersionSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_ClusterServiceVersionStatus_To_operators_ClusterServiceVersionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ClusterServiceVersion_To_operators_ClusterServiceVersion is an autogenerated conversion function. -func Convert_v1alpha1_ClusterServiceVersion_To_operators_ClusterServiceVersion(in *ClusterServiceVersion, out *operators.ClusterServiceVersion, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterServiceVersion_To_operators_ClusterServiceVersion(in, out, s) -} - -func autoConvert_operators_ClusterServiceVersion_To_v1alpha1_ClusterServiceVersion(in *operators.ClusterServiceVersion, out *ClusterServiceVersion, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_operators_ClusterServiceVersionSpec_To_v1alpha1_ClusterServiceVersionSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_operators_ClusterServiceVersionStatus_To_v1alpha1_ClusterServiceVersionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_operators_ClusterServiceVersion_To_v1alpha1_ClusterServiceVersion is an autogenerated conversion function. -func Convert_operators_ClusterServiceVersion_To_v1alpha1_ClusterServiceVersion(in *operators.ClusterServiceVersion, out *ClusterServiceVersion, s conversion.Scope) error { - return autoConvert_operators_ClusterServiceVersion_To_v1alpha1_ClusterServiceVersion(in, out, s) -} - -func autoConvert_v1alpha1_ClusterServiceVersionCondition_To_operators_ClusterServiceVersionCondition(in *ClusterServiceVersionCondition, out *operators.ClusterServiceVersionCondition, s conversion.Scope) error { - out.Phase = operators.ClusterServiceVersionPhase(in.Phase) - out.Message = in.Message - out.Reason = operators.ConditionReason(in.Reason) - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - return nil -} - -// Convert_v1alpha1_ClusterServiceVersionCondition_To_operators_ClusterServiceVersionCondition is an autogenerated conversion function. -func Convert_v1alpha1_ClusterServiceVersionCondition_To_operators_ClusterServiceVersionCondition(in *ClusterServiceVersionCondition, out *operators.ClusterServiceVersionCondition, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterServiceVersionCondition_To_operators_ClusterServiceVersionCondition(in, out, s) -} - -func autoConvert_operators_ClusterServiceVersionCondition_To_v1alpha1_ClusterServiceVersionCondition(in *operators.ClusterServiceVersionCondition, out *ClusterServiceVersionCondition, s conversion.Scope) error { - out.Phase = ClusterServiceVersionPhase(in.Phase) - out.Message = in.Message - out.Reason = ConditionReason(in.Reason) - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - return nil -} - -// Convert_operators_ClusterServiceVersionCondition_To_v1alpha1_ClusterServiceVersionCondition is an autogenerated conversion function. -func Convert_operators_ClusterServiceVersionCondition_To_v1alpha1_ClusterServiceVersionCondition(in *operators.ClusterServiceVersionCondition, out *ClusterServiceVersionCondition, s conversion.Scope) error { - return autoConvert_operators_ClusterServiceVersionCondition_To_v1alpha1_ClusterServiceVersionCondition(in, out, s) -} - -func autoConvert_v1alpha1_ClusterServiceVersionList_To_operators_ClusterServiceVersionList(in *ClusterServiceVersionList, out *operators.ClusterServiceVersionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]operators.ClusterServiceVersion)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ClusterServiceVersionList_To_operators_ClusterServiceVersionList is an autogenerated conversion function. -func Convert_v1alpha1_ClusterServiceVersionList_To_operators_ClusterServiceVersionList(in *ClusterServiceVersionList, out *operators.ClusterServiceVersionList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterServiceVersionList_To_operators_ClusterServiceVersionList(in, out, s) -} - -func autoConvert_operators_ClusterServiceVersionList_To_v1alpha1_ClusterServiceVersionList(in *operators.ClusterServiceVersionList, out *ClusterServiceVersionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]ClusterServiceVersion)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_operators_ClusterServiceVersionList_To_v1alpha1_ClusterServiceVersionList is an autogenerated conversion function. -func Convert_operators_ClusterServiceVersionList_To_v1alpha1_ClusterServiceVersionList(in *operators.ClusterServiceVersionList, out *ClusterServiceVersionList, s conversion.Scope) error { - return autoConvert_operators_ClusterServiceVersionList_To_v1alpha1_ClusterServiceVersionList(in, out, s) -} - -func autoConvert_v1alpha1_ClusterServiceVersionSpec_To_operators_ClusterServiceVersionSpec(in *ClusterServiceVersionSpec, out *operators.ClusterServiceVersionSpec, s conversion.Scope) error { - if err := Convert_v1alpha1_NamedInstallStrategy_To_operators_NamedInstallStrategy(&in.InstallStrategy, &out.InstallStrategy, s); err != nil { - return err - } - out.Version = in.Version - out.Maturity = in.Maturity - if err := Convert_v1alpha1_CustomResourceDefinitions_To_operators_CustomResourceDefinitions(&in.CustomResourceDefinitions, &out.CustomResourceDefinitions, s); err != nil { - return err - } - if err := Convert_v1alpha1_APIServiceDefinitions_To_operators_APIServiceDefinitions(&in.APIServiceDefinitions, &out.APIServiceDefinitions, s); err != nil { - return err - } - out.NativeAPIs = *(*[]metav1.GroupVersionKind)(unsafe.Pointer(&in.NativeAPIs)) - out.MinKubeVersion = in.MinKubeVersion - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Keywords = *(*[]string)(unsafe.Pointer(&in.Keywords)) - out.Maintainers = *(*[]operators.Maintainer)(unsafe.Pointer(&in.Maintainers)) - if err := Convert_v1alpha1_AppLink_To_operators_AppLink(&in.Provider, &out.Provider, s); err != nil { - return err - } - out.Links = *(*[]operators.AppLink)(unsafe.Pointer(&in.Links)) - out.Icon = *(*[]operators.Icon)(unsafe.Pointer(&in.Icon)) - out.InstallModes = *(*[]operators.InstallMode)(unsafe.Pointer(&in.InstallModes)) - out.Replaces = in.Replaces - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) - return nil -} - -// Convert_v1alpha1_ClusterServiceVersionSpec_To_operators_ClusterServiceVersionSpec is an autogenerated conversion function. -func Convert_v1alpha1_ClusterServiceVersionSpec_To_operators_ClusterServiceVersionSpec(in *ClusterServiceVersionSpec, out *operators.ClusterServiceVersionSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterServiceVersionSpec_To_operators_ClusterServiceVersionSpec(in, out, s) -} - -func autoConvert_operators_ClusterServiceVersionSpec_To_v1alpha1_ClusterServiceVersionSpec(in *operators.ClusterServiceVersionSpec, out *ClusterServiceVersionSpec, s conversion.Scope) error { - if err := Convert_operators_NamedInstallStrategy_To_v1alpha1_NamedInstallStrategy(&in.InstallStrategy, &out.InstallStrategy, s); err != nil { - return err - } - out.Version = in.Version - out.Maturity = in.Maturity - if err := Convert_operators_CustomResourceDefinitions_To_v1alpha1_CustomResourceDefinitions(&in.CustomResourceDefinitions, &out.CustomResourceDefinitions, s); err != nil { - return err - } - if err := Convert_operators_APIServiceDefinitions_To_v1alpha1_APIServiceDefinitions(&in.APIServiceDefinitions, &out.APIServiceDefinitions, s); err != nil { - return err - } - out.NativeAPIs = *(*[]metav1.GroupVersionKind)(unsafe.Pointer(&in.NativeAPIs)) - out.MinKubeVersion = in.MinKubeVersion - out.DisplayName = in.DisplayName - out.Description = in.Description - out.Keywords = *(*[]string)(unsafe.Pointer(&in.Keywords)) - out.Maintainers = *(*[]Maintainer)(unsafe.Pointer(&in.Maintainers)) - if err := Convert_operators_AppLink_To_v1alpha1_AppLink(&in.Provider, &out.Provider, s); err != nil { - return err - } - out.Links = *(*[]AppLink)(unsafe.Pointer(&in.Links)) - out.Icon = *(*[]Icon)(unsafe.Pointer(&in.Icon)) - out.InstallModes = *(*[]InstallMode)(unsafe.Pointer(&in.InstallModes)) - out.Replaces = in.Replaces - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) - return nil -} - -// Convert_operators_ClusterServiceVersionSpec_To_v1alpha1_ClusterServiceVersionSpec is an autogenerated conversion function. -func Convert_operators_ClusterServiceVersionSpec_To_v1alpha1_ClusterServiceVersionSpec(in *operators.ClusterServiceVersionSpec, out *ClusterServiceVersionSpec, s conversion.Scope) error { - return autoConvert_operators_ClusterServiceVersionSpec_To_v1alpha1_ClusterServiceVersionSpec(in, out, s) -} - -func autoConvert_v1alpha1_ClusterServiceVersionStatus_To_operators_ClusterServiceVersionStatus(in *ClusterServiceVersionStatus, out *operators.ClusterServiceVersionStatus, s conversion.Scope) error { - out.Phase = operators.ClusterServiceVersionPhase(in.Phase) - out.Message = in.Message - out.Reason = operators.ConditionReason(in.Reason) - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - out.Conditions = *(*[]operators.ClusterServiceVersionCondition)(unsafe.Pointer(&in.Conditions)) - out.RequirementStatus = *(*[]operators.RequirementStatus)(unsafe.Pointer(&in.RequirementStatus)) - out.CertsLastUpdated = (*metav1.Time)(unsafe.Pointer(in.CertsLastUpdated)) - out.CertsRotateAt = (*metav1.Time)(unsafe.Pointer(in.CertsRotateAt)) - return nil -} - -// Convert_v1alpha1_ClusterServiceVersionStatus_To_operators_ClusterServiceVersionStatus is an autogenerated conversion function. -func Convert_v1alpha1_ClusterServiceVersionStatus_To_operators_ClusterServiceVersionStatus(in *ClusterServiceVersionStatus, out *operators.ClusterServiceVersionStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterServiceVersionStatus_To_operators_ClusterServiceVersionStatus(in, out, s) -} - -func autoConvert_operators_ClusterServiceVersionStatus_To_v1alpha1_ClusterServiceVersionStatus(in *operators.ClusterServiceVersionStatus, out *ClusterServiceVersionStatus, s conversion.Scope) error { - out.Phase = ClusterServiceVersionPhase(in.Phase) - out.Message = in.Message - out.Reason = ConditionReason(in.Reason) - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - out.Conditions = *(*[]ClusterServiceVersionCondition)(unsafe.Pointer(&in.Conditions)) - out.RequirementStatus = *(*[]RequirementStatus)(unsafe.Pointer(&in.RequirementStatus)) - out.CertsLastUpdated = (*metav1.Time)(unsafe.Pointer(in.CertsLastUpdated)) - out.CertsRotateAt = (*metav1.Time)(unsafe.Pointer(in.CertsRotateAt)) - return nil -} - -// Convert_operators_ClusterServiceVersionStatus_To_v1alpha1_ClusterServiceVersionStatus is an autogenerated conversion function. -func Convert_operators_ClusterServiceVersionStatus_To_v1alpha1_ClusterServiceVersionStatus(in *operators.ClusterServiceVersionStatus, out *ClusterServiceVersionStatus, s conversion.Scope) error { - return autoConvert_operators_ClusterServiceVersionStatus_To_v1alpha1_ClusterServiceVersionStatus(in, out, s) -} - -func autoConvert_v1alpha1_ConfigMapResourceReference_To_operators_ConfigMapResourceReference(in *ConfigMapResourceReference, out *operators.ConfigMapResourceReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.LastUpdateTime = in.LastUpdateTime - return nil -} - -// Convert_v1alpha1_ConfigMapResourceReference_To_operators_ConfigMapResourceReference is an autogenerated conversion function. -func Convert_v1alpha1_ConfigMapResourceReference_To_operators_ConfigMapResourceReference(in *ConfigMapResourceReference, out *operators.ConfigMapResourceReference, s conversion.Scope) error { - return autoConvert_v1alpha1_ConfigMapResourceReference_To_operators_ConfigMapResourceReference(in, out, s) -} - -func autoConvert_operators_ConfigMapResourceReference_To_v1alpha1_ConfigMapResourceReference(in *operators.ConfigMapResourceReference, out *ConfigMapResourceReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.LastUpdateTime = in.LastUpdateTime - return nil -} - -// Convert_operators_ConfigMapResourceReference_To_v1alpha1_ConfigMapResourceReference is an autogenerated conversion function. -func Convert_operators_ConfigMapResourceReference_To_v1alpha1_ConfigMapResourceReference(in *operators.ConfigMapResourceReference, out *ConfigMapResourceReference, s conversion.Scope) error { - return autoConvert_operators_ConfigMapResourceReference_To_v1alpha1_ConfigMapResourceReference(in, out, s) -} - -func autoConvert_v1alpha1_CustomResourceDefinitions_To_operators_CustomResourceDefinitions(in *CustomResourceDefinitions, out *operators.CustomResourceDefinitions, s conversion.Scope) error { - out.Owned = *(*[]operators.CRDDescription)(unsafe.Pointer(&in.Owned)) - out.Required = *(*[]operators.CRDDescription)(unsafe.Pointer(&in.Required)) - return nil -} - -// Convert_v1alpha1_CustomResourceDefinitions_To_operators_CustomResourceDefinitions is an autogenerated conversion function. -func Convert_v1alpha1_CustomResourceDefinitions_To_operators_CustomResourceDefinitions(in *CustomResourceDefinitions, out *operators.CustomResourceDefinitions, s conversion.Scope) error { - return autoConvert_v1alpha1_CustomResourceDefinitions_To_operators_CustomResourceDefinitions(in, out, s) -} - -func autoConvert_operators_CustomResourceDefinitions_To_v1alpha1_CustomResourceDefinitions(in *operators.CustomResourceDefinitions, out *CustomResourceDefinitions, s conversion.Scope) error { - out.Owned = *(*[]CRDDescription)(unsafe.Pointer(&in.Owned)) - out.Required = *(*[]CRDDescription)(unsafe.Pointer(&in.Required)) - return nil -} - -// Convert_operators_CustomResourceDefinitions_To_v1alpha1_CustomResourceDefinitions is an autogenerated conversion function. -func Convert_operators_CustomResourceDefinitions_To_v1alpha1_CustomResourceDefinitions(in *operators.CustomResourceDefinitions, out *CustomResourceDefinitions, s conversion.Scope) error { - return autoConvert_operators_CustomResourceDefinitions_To_v1alpha1_CustomResourceDefinitions(in, out, s) -} - -func autoConvert_v1alpha1_DependentStatus_To_operators_DependentStatus(in *DependentStatus, out *operators.DependentStatus, s conversion.Scope) error { - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.Status = operators.StatusReason(in.Status) - out.UUID = in.UUID - out.Message = in.Message - return nil -} - -// Convert_v1alpha1_DependentStatus_To_operators_DependentStatus is an autogenerated conversion function. -func Convert_v1alpha1_DependentStatus_To_operators_DependentStatus(in *DependentStatus, out *operators.DependentStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_DependentStatus_To_operators_DependentStatus(in, out, s) -} - -func autoConvert_operators_DependentStatus_To_v1alpha1_DependentStatus(in *operators.DependentStatus, out *DependentStatus, s conversion.Scope) error { - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.Status = StatusReason(in.Status) - out.UUID = in.UUID - out.Message = in.Message - return nil -} - -// Convert_operators_DependentStatus_To_v1alpha1_DependentStatus is an autogenerated conversion function. -func Convert_operators_DependentStatus_To_v1alpha1_DependentStatus(in *operators.DependentStatus, out *DependentStatus, s conversion.Scope) error { - return autoConvert_operators_DependentStatus_To_v1alpha1_DependentStatus(in, out, s) -} - -func autoConvert_v1alpha1_GRPCConnectionState_To_operators_GRPCConnectionState(in *GRPCConnectionState, out *operators.GRPCConnectionState, s conversion.Scope) error { - out.Address = in.Address - out.LastObservedState = in.LastObservedState - out.LastConnectTime = in.LastConnectTime - return nil -} - -// Convert_v1alpha1_GRPCConnectionState_To_operators_GRPCConnectionState is an autogenerated conversion function. -func Convert_v1alpha1_GRPCConnectionState_To_operators_GRPCConnectionState(in *GRPCConnectionState, out *operators.GRPCConnectionState, s conversion.Scope) error { - return autoConvert_v1alpha1_GRPCConnectionState_To_operators_GRPCConnectionState(in, out, s) -} - -func autoConvert_operators_GRPCConnectionState_To_v1alpha1_GRPCConnectionState(in *operators.GRPCConnectionState, out *GRPCConnectionState, s conversion.Scope) error { - out.Address = in.Address - out.LastObservedState = in.LastObservedState - out.LastConnectTime = in.LastConnectTime - return nil -} - -// Convert_operators_GRPCConnectionState_To_v1alpha1_GRPCConnectionState is an autogenerated conversion function. -func Convert_operators_GRPCConnectionState_To_v1alpha1_GRPCConnectionState(in *operators.GRPCConnectionState, out *GRPCConnectionState, s conversion.Scope) error { - return autoConvert_operators_GRPCConnectionState_To_v1alpha1_GRPCConnectionState(in, out, s) -} - -func autoConvert_v1alpha1_Icon_To_operators_Icon(in *Icon, out *operators.Icon, s conversion.Scope) error { - out.Data = in.Data - out.MediaType = in.MediaType - return nil -} - -// Convert_v1alpha1_Icon_To_operators_Icon is an autogenerated conversion function. -func Convert_v1alpha1_Icon_To_operators_Icon(in *Icon, out *operators.Icon, s conversion.Scope) error { - return autoConvert_v1alpha1_Icon_To_operators_Icon(in, out, s) -} - -func autoConvert_operators_Icon_To_v1alpha1_Icon(in *operators.Icon, out *Icon, s conversion.Scope) error { - out.Data = in.Data - out.MediaType = in.MediaType - return nil -} - -// Convert_operators_Icon_To_v1alpha1_Icon is an autogenerated conversion function. -func Convert_operators_Icon_To_v1alpha1_Icon(in *operators.Icon, out *Icon, s conversion.Scope) error { - return autoConvert_operators_Icon_To_v1alpha1_Icon(in, out, s) -} - -func autoConvert_v1alpha1_InstallMode_To_operators_InstallMode(in *InstallMode, out *operators.InstallMode, s conversion.Scope) error { - out.Type = operators.InstallModeType(in.Type) - out.Supported = in.Supported - return nil -} - -// Convert_v1alpha1_InstallMode_To_operators_InstallMode is an autogenerated conversion function. -func Convert_v1alpha1_InstallMode_To_operators_InstallMode(in *InstallMode, out *operators.InstallMode, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallMode_To_operators_InstallMode(in, out, s) -} - -func autoConvert_operators_InstallMode_To_v1alpha1_InstallMode(in *operators.InstallMode, out *InstallMode, s conversion.Scope) error { - out.Type = InstallModeType(in.Type) - out.Supported = in.Supported - return nil -} - -// Convert_operators_InstallMode_To_v1alpha1_InstallMode is an autogenerated conversion function. -func Convert_operators_InstallMode_To_v1alpha1_InstallMode(in *operators.InstallMode, out *InstallMode, s conversion.Scope) error { - return autoConvert_operators_InstallMode_To_v1alpha1_InstallMode(in, out, s) -} - -func autoConvert_v1alpha1_InstallPlan_To_operators_InstallPlan(in *InstallPlan, out *operators.InstallPlan, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_InstallPlanSpec_To_operators_InstallPlanSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha1_InstallPlanStatus_To_operators_InstallPlanStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_InstallPlan_To_operators_InstallPlan is an autogenerated conversion function. -func Convert_v1alpha1_InstallPlan_To_operators_InstallPlan(in *InstallPlan, out *operators.InstallPlan, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallPlan_To_operators_InstallPlan(in, out, s) -} - -func autoConvert_operators_InstallPlan_To_v1alpha1_InstallPlan(in *operators.InstallPlan, out *InstallPlan, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_operators_InstallPlanSpec_To_v1alpha1_InstallPlanSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_operators_InstallPlanStatus_To_v1alpha1_InstallPlanStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_operators_InstallPlan_To_v1alpha1_InstallPlan is an autogenerated conversion function. -func Convert_operators_InstallPlan_To_v1alpha1_InstallPlan(in *operators.InstallPlan, out *InstallPlan, s conversion.Scope) error { - return autoConvert_operators_InstallPlan_To_v1alpha1_InstallPlan(in, out, s) -} - -func autoConvert_v1alpha1_InstallPlanCondition_To_operators_InstallPlanCondition(in *InstallPlanCondition, out *operators.InstallPlanCondition, s conversion.Scope) error { - out.Type = operators.InstallPlanConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - out.Reason = operators.InstallPlanConditionReason(in.Reason) - out.Message = in.Message - return nil -} - -// Convert_v1alpha1_InstallPlanCondition_To_operators_InstallPlanCondition is an autogenerated conversion function. -func Convert_v1alpha1_InstallPlanCondition_To_operators_InstallPlanCondition(in *InstallPlanCondition, out *operators.InstallPlanCondition, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallPlanCondition_To_operators_InstallPlanCondition(in, out, s) -} - -func autoConvert_operators_InstallPlanCondition_To_v1alpha1_InstallPlanCondition(in *operators.InstallPlanCondition, out *InstallPlanCondition, s conversion.Scope) error { - out.Type = InstallPlanConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastUpdateTime = (*metav1.Time)(unsafe.Pointer(in.LastUpdateTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - out.Reason = InstallPlanConditionReason(in.Reason) - out.Message = in.Message - return nil -} - -// Convert_operators_InstallPlanCondition_To_v1alpha1_InstallPlanCondition is an autogenerated conversion function. -func Convert_operators_InstallPlanCondition_To_v1alpha1_InstallPlanCondition(in *operators.InstallPlanCondition, out *InstallPlanCondition, s conversion.Scope) error { - return autoConvert_operators_InstallPlanCondition_To_v1alpha1_InstallPlanCondition(in, out, s) -} - -func autoConvert_v1alpha1_InstallPlanList_To_operators_InstallPlanList(in *InstallPlanList, out *operators.InstallPlanList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]operators.InstallPlan)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_InstallPlanList_To_operators_InstallPlanList is an autogenerated conversion function. -func Convert_v1alpha1_InstallPlanList_To_operators_InstallPlanList(in *InstallPlanList, out *operators.InstallPlanList, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallPlanList_To_operators_InstallPlanList(in, out, s) -} - -func autoConvert_operators_InstallPlanList_To_v1alpha1_InstallPlanList(in *operators.InstallPlanList, out *InstallPlanList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]InstallPlan)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_operators_InstallPlanList_To_v1alpha1_InstallPlanList is an autogenerated conversion function. -func Convert_operators_InstallPlanList_To_v1alpha1_InstallPlanList(in *operators.InstallPlanList, out *InstallPlanList, s conversion.Scope) error { - return autoConvert_operators_InstallPlanList_To_v1alpha1_InstallPlanList(in, out, s) -} - -func autoConvert_v1alpha1_InstallPlanReference_To_operators_InstallPlanReference(in *InstallPlanReference, out *operators.InstallPlanReference, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = types.UID(in.UID) - return nil -} - -// Convert_v1alpha1_InstallPlanReference_To_operators_InstallPlanReference is an autogenerated conversion function. -func Convert_v1alpha1_InstallPlanReference_To_operators_InstallPlanReference(in *InstallPlanReference, out *operators.InstallPlanReference, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallPlanReference_To_operators_InstallPlanReference(in, out, s) -} - -func autoConvert_operators_InstallPlanReference_To_v1alpha1_InstallPlanReference(in *operators.InstallPlanReference, out *InstallPlanReference, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.Kind = in.Kind - out.Name = in.Name - out.UID = types.UID(in.UID) - return nil -} - -// Convert_operators_InstallPlanReference_To_v1alpha1_InstallPlanReference is an autogenerated conversion function. -func Convert_operators_InstallPlanReference_To_v1alpha1_InstallPlanReference(in *operators.InstallPlanReference, out *InstallPlanReference, s conversion.Scope) error { - return autoConvert_operators_InstallPlanReference_To_v1alpha1_InstallPlanReference(in, out, s) -} - -func autoConvert_v1alpha1_InstallPlanSpec_To_operators_InstallPlanSpec(in *InstallPlanSpec, out *operators.InstallPlanSpec, s conversion.Scope) error { - out.CatalogSource = in.CatalogSource - out.CatalogSourceNamespace = in.CatalogSourceNamespace - out.ClusterServiceVersionNames = *(*[]string)(unsafe.Pointer(&in.ClusterServiceVersionNames)) - out.Approval = operators.Approval(in.Approval) - out.Approved = in.Approved - return nil -} - -// Convert_v1alpha1_InstallPlanSpec_To_operators_InstallPlanSpec is an autogenerated conversion function. -func Convert_v1alpha1_InstallPlanSpec_To_operators_InstallPlanSpec(in *InstallPlanSpec, out *operators.InstallPlanSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallPlanSpec_To_operators_InstallPlanSpec(in, out, s) -} - -func autoConvert_operators_InstallPlanSpec_To_v1alpha1_InstallPlanSpec(in *operators.InstallPlanSpec, out *InstallPlanSpec, s conversion.Scope) error { - out.CatalogSource = in.CatalogSource - out.CatalogSourceNamespace = in.CatalogSourceNamespace - out.ClusterServiceVersionNames = *(*[]string)(unsafe.Pointer(&in.ClusterServiceVersionNames)) - out.Approval = Approval(in.Approval) - out.Approved = in.Approved - return nil -} - -// Convert_operators_InstallPlanSpec_To_v1alpha1_InstallPlanSpec is an autogenerated conversion function. -func Convert_operators_InstallPlanSpec_To_v1alpha1_InstallPlanSpec(in *operators.InstallPlanSpec, out *InstallPlanSpec, s conversion.Scope) error { - return autoConvert_operators_InstallPlanSpec_To_v1alpha1_InstallPlanSpec(in, out, s) -} - -func autoConvert_v1alpha1_InstallPlanStatus_To_operators_InstallPlanStatus(in *InstallPlanStatus, out *operators.InstallPlanStatus, s conversion.Scope) error { - out.Phase = operators.InstallPlanPhase(in.Phase) - out.Conditions = *(*[]operators.InstallPlanCondition)(unsafe.Pointer(&in.Conditions)) - out.CatalogSources = *(*[]string)(unsafe.Pointer(&in.CatalogSources)) - out.Plan = *(*[]*operators.Step)(unsafe.Pointer(&in.Plan)) - out.BundleLookups = *(*[]operators.BundleLookup)(unsafe.Pointer(&in.BundleLookups)) - out.AttenuatedServiceAccountRef = (*v1.ObjectReference)(unsafe.Pointer(in.AttenuatedServiceAccountRef)) - return nil -} - -// Convert_v1alpha1_InstallPlanStatus_To_operators_InstallPlanStatus is an autogenerated conversion function. -func Convert_v1alpha1_InstallPlanStatus_To_operators_InstallPlanStatus(in *InstallPlanStatus, out *operators.InstallPlanStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_InstallPlanStatus_To_operators_InstallPlanStatus(in, out, s) -} - -func autoConvert_operators_InstallPlanStatus_To_v1alpha1_InstallPlanStatus(in *operators.InstallPlanStatus, out *InstallPlanStatus, s conversion.Scope) error { - out.Phase = InstallPlanPhase(in.Phase) - out.Conditions = *(*[]InstallPlanCondition)(unsafe.Pointer(&in.Conditions)) - out.CatalogSources = *(*[]string)(unsafe.Pointer(&in.CatalogSources)) - out.Plan = *(*[]*Step)(unsafe.Pointer(&in.Plan)) - out.BundleLookups = *(*[]BundleLookup)(unsafe.Pointer(&in.BundleLookups)) - out.AttenuatedServiceAccountRef = (*v1.ObjectReference)(unsafe.Pointer(in.AttenuatedServiceAccountRef)) - return nil -} - -// Convert_operators_InstallPlanStatus_To_v1alpha1_InstallPlanStatus is an autogenerated conversion function. -func Convert_operators_InstallPlanStatus_To_v1alpha1_InstallPlanStatus(in *operators.InstallPlanStatus, out *InstallPlanStatus, s conversion.Scope) error { - return autoConvert_operators_InstallPlanStatus_To_v1alpha1_InstallPlanStatus(in, out, s) -} - -func autoConvert_v1alpha1_Maintainer_To_operators_Maintainer(in *Maintainer, out *operators.Maintainer, s conversion.Scope) error { - out.Name = in.Name - out.Email = in.Email - return nil -} - -// Convert_v1alpha1_Maintainer_To_operators_Maintainer is an autogenerated conversion function. -func Convert_v1alpha1_Maintainer_To_operators_Maintainer(in *Maintainer, out *operators.Maintainer, s conversion.Scope) error { - return autoConvert_v1alpha1_Maintainer_To_operators_Maintainer(in, out, s) -} - -func autoConvert_operators_Maintainer_To_v1alpha1_Maintainer(in *operators.Maintainer, out *Maintainer, s conversion.Scope) error { - out.Name = in.Name - out.Email = in.Email - return nil -} - -// Convert_operators_Maintainer_To_v1alpha1_Maintainer is an autogenerated conversion function. -func Convert_operators_Maintainer_To_v1alpha1_Maintainer(in *operators.Maintainer, out *Maintainer, s conversion.Scope) error { - return autoConvert_operators_Maintainer_To_v1alpha1_Maintainer(in, out, s) -} - -func autoConvert_v1alpha1_NamedInstallStrategy_To_operators_NamedInstallStrategy(in *NamedInstallStrategy, out *operators.NamedInstallStrategy, s conversion.Scope) error { - out.StrategyName = in.StrategyName - if err := Convert_v1alpha1_StrategyDetailsDeployment_To_operators_StrategyDetailsDeployment(&in.StrategySpec, &out.StrategySpec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_NamedInstallStrategy_To_operators_NamedInstallStrategy is an autogenerated conversion function. -func Convert_v1alpha1_NamedInstallStrategy_To_operators_NamedInstallStrategy(in *NamedInstallStrategy, out *operators.NamedInstallStrategy, s conversion.Scope) error { - return autoConvert_v1alpha1_NamedInstallStrategy_To_operators_NamedInstallStrategy(in, out, s) -} - -func autoConvert_operators_NamedInstallStrategy_To_v1alpha1_NamedInstallStrategy(in *operators.NamedInstallStrategy, out *NamedInstallStrategy, s conversion.Scope) error { - out.StrategyName = in.StrategyName - if err := Convert_operators_StrategyDetailsDeployment_To_v1alpha1_StrategyDetailsDeployment(&in.StrategySpec, &out.StrategySpec, s); err != nil { - return err - } - return nil -} - -// Convert_operators_NamedInstallStrategy_To_v1alpha1_NamedInstallStrategy is an autogenerated conversion function. -func Convert_operators_NamedInstallStrategy_To_v1alpha1_NamedInstallStrategy(in *operators.NamedInstallStrategy, out *NamedInstallStrategy, s conversion.Scope) error { - return autoConvert_operators_NamedInstallStrategy_To_v1alpha1_NamedInstallStrategy(in, out, s) -} - -func autoConvert_v1alpha1_RegistryPoll_To_operators_RegistryPoll(in *RegistryPoll, out *operators.RegistryPoll, s conversion.Scope) error { - out.Interval = (*metav1.Duration)(unsafe.Pointer(in.Interval)) - return nil -} - -// Convert_v1alpha1_RegistryPoll_To_operators_RegistryPoll is an autogenerated conversion function. -func Convert_v1alpha1_RegistryPoll_To_operators_RegistryPoll(in *RegistryPoll, out *operators.RegistryPoll, s conversion.Scope) error { - return autoConvert_v1alpha1_RegistryPoll_To_operators_RegistryPoll(in, out, s) -} - -func autoConvert_operators_RegistryPoll_To_v1alpha1_RegistryPoll(in *operators.RegistryPoll, out *RegistryPoll, s conversion.Scope) error { - out.Interval = (*metav1.Duration)(unsafe.Pointer(in.Interval)) - return nil -} - -// Convert_operators_RegistryPoll_To_v1alpha1_RegistryPoll is an autogenerated conversion function. -func Convert_operators_RegistryPoll_To_v1alpha1_RegistryPoll(in *operators.RegistryPoll, out *RegistryPoll, s conversion.Scope) error { - return autoConvert_operators_RegistryPoll_To_v1alpha1_RegistryPoll(in, out, s) -} - -func autoConvert_v1alpha1_RegistryServiceStatus_To_operators_RegistryServiceStatus(in *RegistryServiceStatus, out *operators.RegistryServiceStatus, s conversion.Scope) error { - out.Protocol = in.Protocol - out.ServiceName = in.ServiceName - out.ServiceNamespace = in.ServiceNamespace - out.Port = in.Port - out.CreatedAt = in.CreatedAt - return nil -} - -// Convert_v1alpha1_RegistryServiceStatus_To_operators_RegistryServiceStatus is an autogenerated conversion function. -func Convert_v1alpha1_RegistryServiceStatus_To_operators_RegistryServiceStatus(in *RegistryServiceStatus, out *operators.RegistryServiceStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_RegistryServiceStatus_To_operators_RegistryServiceStatus(in, out, s) -} - -func autoConvert_operators_RegistryServiceStatus_To_v1alpha1_RegistryServiceStatus(in *operators.RegistryServiceStatus, out *RegistryServiceStatus, s conversion.Scope) error { - out.Protocol = in.Protocol - out.ServiceName = in.ServiceName - out.ServiceNamespace = in.ServiceNamespace - out.Port = in.Port - out.CreatedAt = in.CreatedAt - return nil -} - -// Convert_operators_RegistryServiceStatus_To_v1alpha1_RegistryServiceStatus is an autogenerated conversion function. -func Convert_operators_RegistryServiceStatus_To_v1alpha1_RegistryServiceStatus(in *operators.RegistryServiceStatus, out *RegistryServiceStatus, s conversion.Scope) error { - return autoConvert_operators_RegistryServiceStatus_To_v1alpha1_RegistryServiceStatus(in, out, s) -} - -func autoConvert_v1alpha1_RequirementStatus_To_operators_RequirementStatus(in *RequirementStatus, out *operators.RequirementStatus, s conversion.Scope) error { - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.Name = in.Name - out.Status = operators.StatusReason(in.Status) - out.Message = in.Message - out.UUID = in.UUID - out.Dependents = *(*[]operators.DependentStatus)(unsafe.Pointer(&in.Dependents)) - return nil -} - -// Convert_v1alpha1_RequirementStatus_To_operators_RequirementStatus is an autogenerated conversion function. -func Convert_v1alpha1_RequirementStatus_To_operators_RequirementStatus(in *RequirementStatus, out *operators.RequirementStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_RequirementStatus_To_operators_RequirementStatus(in, out, s) -} - -func autoConvert_operators_RequirementStatus_To_v1alpha1_RequirementStatus(in *operators.RequirementStatus, out *RequirementStatus, s conversion.Scope) error { - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.Name = in.Name - out.Status = StatusReason(in.Status) - out.Message = in.Message - out.UUID = in.UUID - out.Dependents = *(*[]DependentStatus)(unsafe.Pointer(&in.Dependents)) - return nil -} - -// Convert_operators_RequirementStatus_To_v1alpha1_RequirementStatus is an autogenerated conversion function. -func Convert_operators_RequirementStatus_To_v1alpha1_RequirementStatus(in *operators.RequirementStatus, out *RequirementStatus, s conversion.Scope) error { - return autoConvert_operators_RequirementStatus_To_v1alpha1_RequirementStatus(in, out, s) -} - -func autoConvert_v1alpha1_SpecDescriptor_To_operators_SpecDescriptor(in *SpecDescriptor, out *operators.SpecDescriptor, s conversion.Scope) error { - out.Path = in.Path - out.DisplayName = in.DisplayName - out.Description = in.Description - out.XDescriptors = *(*[]string)(unsafe.Pointer(&in.XDescriptors)) - out.Value = (*json.RawMessage)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_v1alpha1_SpecDescriptor_To_operators_SpecDescriptor is an autogenerated conversion function. -func Convert_v1alpha1_SpecDescriptor_To_operators_SpecDescriptor(in *SpecDescriptor, out *operators.SpecDescriptor, s conversion.Scope) error { - return autoConvert_v1alpha1_SpecDescriptor_To_operators_SpecDescriptor(in, out, s) -} - -func autoConvert_operators_SpecDescriptor_To_v1alpha1_SpecDescriptor(in *operators.SpecDescriptor, out *SpecDescriptor, s conversion.Scope) error { - out.Path = in.Path - out.DisplayName = in.DisplayName - out.Description = in.Description - out.XDescriptors = *(*[]string)(unsafe.Pointer(&in.XDescriptors)) - out.Value = (*json.RawMessage)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_operators_SpecDescriptor_To_v1alpha1_SpecDescriptor is an autogenerated conversion function. -func Convert_operators_SpecDescriptor_To_v1alpha1_SpecDescriptor(in *operators.SpecDescriptor, out *SpecDescriptor, s conversion.Scope) error { - return autoConvert_operators_SpecDescriptor_To_v1alpha1_SpecDescriptor(in, out, s) -} - -func autoConvert_v1alpha1_StatusDescriptor_To_operators_StatusDescriptor(in *StatusDescriptor, out *operators.StatusDescriptor, s conversion.Scope) error { - out.Path = in.Path - out.DisplayName = in.DisplayName - out.Description = in.Description - out.XDescriptors = *(*[]string)(unsafe.Pointer(&in.XDescriptors)) - out.Value = (*json.RawMessage)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_v1alpha1_StatusDescriptor_To_operators_StatusDescriptor is an autogenerated conversion function. -func Convert_v1alpha1_StatusDescriptor_To_operators_StatusDescriptor(in *StatusDescriptor, out *operators.StatusDescriptor, s conversion.Scope) error { - return autoConvert_v1alpha1_StatusDescriptor_To_operators_StatusDescriptor(in, out, s) -} - -func autoConvert_operators_StatusDescriptor_To_v1alpha1_StatusDescriptor(in *operators.StatusDescriptor, out *StatusDescriptor, s conversion.Scope) error { - out.Path = in.Path - out.DisplayName = in.DisplayName - out.Description = in.Description - out.XDescriptors = *(*[]string)(unsafe.Pointer(&in.XDescriptors)) - out.Value = (*json.RawMessage)(unsafe.Pointer(in.Value)) - return nil -} - -// Convert_operators_StatusDescriptor_To_v1alpha1_StatusDescriptor is an autogenerated conversion function. -func Convert_operators_StatusDescriptor_To_v1alpha1_StatusDescriptor(in *operators.StatusDescriptor, out *StatusDescriptor, s conversion.Scope) error { - return autoConvert_operators_StatusDescriptor_To_v1alpha1_StatusDescriptor(in, out, s) -} - -func autoConvert_v1alpha1_Step_To_operators_Step(in *Step, out *operators.Step, s conversion.Scope) error { - out.Resolving = in.Resolving - if err := Convert_v1alpha1_StepResource_To_operators_StepResource(&in.Resource, &out.Resource, s); err != nil { - return err - } - out.Status = operators.StepStatus(in.Status) - return nil -} - -// Convert_v1alpha1_Step_To_operators_Step is an autogenerated conversion function. -func Convert_v1alpha1_Step_To_operators_Step(in *Step, out *operators.Step, s conversion.Scope) error { - return autoConvert_v1alpha1_Step_To_operators_Step(in, out, s) -} - -func autoConvert_operators_Step_To_v1alpha1_Step(in *operators.Step, out *Step, s conversion.Scope) error { - out.Resolving = in.Resolving - if err := Convert_operators_StepResource_To_v1alpha1_StepResource(&in.Resource, &out.Resource, s); err != nil { - return err - } - out.Status = StepStatus(in.Status) - return nil -} - -// Convert_operators_Step_To_v1alpha1_Step is an autogenerated conversion function. -func Convert_operators_Step_To_v1alpha1_Step(in *operators.Step, out *Step, s conversion.Scope) error { - return autoConvert_operators_Step_To_v1alpha1_Step(in, out, s) -} - -func autoConvert_v1alpha1_StepResource_To_operators_StepResource(in *StepResource, out *operators.StepResource, s conversion.Scope) error { - out.CatalogSource = in.CatalogSource - out.CatalogSourceNamespace = in.CatalogSourceNamespace - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.Name = in.Name - out.Manifest = in.Manifest - return nil -} - -// Convert_v1alpha1_StepResource_To_operators_StepResource is an autogenerated conversion function. -func Convert_v1alpha1_StepResource_To_operators_StepResource(in *StepResource, out *operators.StepResource, s conversion.Scope) error { - return autoConvert_v1alpha1_StepResource_To_operators_StepResource(in, out, s) -} - -func autoConvert_operators_StepResource_To_v1alpha1_StepResource(in *operators.StepResource, out *StepResource, s conversion.Scope) error { - out.CatalogSource = in.CatalogSource - out.CatalogSourceNamespace = in.CatalogSourceNamespace - out.Group = in.Group - out.Version = in.Version - out.Kind = in.Kind - out.Name = in.Name - out.Manifest = in.Manifest - return nil -} - -// Convert_operators_StepResource_To_v1alpha1_StepResource is an autogenerated conversion function. -func Convert_operators_StepResource_To_v1alpha1_StepResource(in *operators.StepResource, out *StepResource, s conversion.Scope) error { - return autoConvert_operators_StepResource_To_v1alpha1_StepResource(in, out, s) -} - -func autoConvert_v1alpha1_StrategyDeploymentPermissions_To_operators_StrategyDeploymentPermissions(in *StrategyDeploymentPermissions, out *operators.StrategyDeploymentPermissions, s conversion.Scope) error { - out.ServiceAccountName = in.ServiceAccountName - out.Rules = *(*[]rbacv1.PolicyRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -// Convert_v1alpha1_StrategyDeploymentPermissions_To_operators_StrategyDeploymentPermissions is an autogenerated conversion function. -func Convert_v1alpha1_StrategyDeploymentPermissions_To_operators_StrategyDeploymentPermissions(in *StrategyDeploymentPermissions, out *operators.StrategyDeploymentPermissions, s conversion.Scope) error { - return autoConvert_v1alpha1_StrategyDeploymentPermissions_To_operators_StrategyDeploymentPermissions(in, out, s) -} - -func autoConvert_operators_StrategyDeploymentPermissions_To_v1alpha1_StrategyDeploymentPermissions(in *operators.StrategyDeploymentPermissions, out *StrategyDeploymentPermissions, s conversion.Scope) error { - out.ServiceAccountName = in.ServiceAccountName - out.Rules = *(*[]rbacv1.PolicyRule)(unsafe.Pointer(&in.Rules)) - return nil -} - -// Convert_operators_StrategyDeploymentPermissions_To_v1alpha1_StrategyDeploymentPermissions is an autogenerated conversion function. -func Convert_operators_StrategyDeploymentPermissions_To_v1alpha1_StrategyDeploymentPermissions(in *operators.StrategyDeploymentPermissions, out *StrategyDeploymentPermissions, s conversion.Scope) error { - return autoConvert_operators_StrategyDeploymentPermissions_To_v1alpha1_StrategyDeploymentPermissions(in, out, s) -} - -func autoConvert_v1alpha1_StrategyDeploymentSpec_To_operators_StrategyDeploymentSpec(in *StrategyDeploymentSpec, out *operators.StrategyDeploymentSpec, s conversion.Scope) error { - out.Name = in.Name - out.Spec = in.Spec - return nil -} - -// Convert_v1alpha1_StrategyDeploymentSpec_To_operators_StrategyDeploymentSpec is an autogenerated conversion function. -func Convert_v1alpha1_StrategyDeploymentSpec_To_operators_StrategyDeploymentSpec(in *StrategyDeploymentSpec, out *operators.StrategyDeploymentSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_StrategyDeploymentSpec_To_operators_StrategyDeploymentSpec(in, out, s) -} - -func autoConvert_operators_StrategyDeploymentSpec_To_v1alpha1_StrategyDeploymentSpec(in *operators.StrategyDeploymentSpec, out *StrategyDeploymentSpec, s conversion.Scope) error { - out.Name = in.Name - out.Spec = in.Spec - return nil -} - -// Convert_operators_StrategyDeploymentSpec_To_v1alpha1_StrategyDeploymentSpec is an autogenerated conversion function. -func Convert_operators_StrategyDeploymentSpec_To_v1alpha1_StrategyDeploymentSpec(in *operators.StrategyDeploymentSpec, out *StrategyDeploymentSpec, s conversion.Scope) error { - return autoConvert_operators_StrategyDeploymentSpec_To_v1alpha1_StrategyDeploymentSpec(in, out, s) -} - -func autoConvert_v1alpha1_StrategyDetailsDeployment_To_operators_StrategyDetailsDeployment(in *StrategyDetailsDeployment, out *operators.StrategyDetailsDeployment, s conversion.Scope) error { - out.DeploymentSpecs = *(*[]operators.StrategyDeploymentSpec)(unsafe.Pointer(&in.DeploymentSpecs)) - out.Permissions = *(*[]operators.StrategyDeploymentPermissions)(unsafe.Pointer(&in.Permissions)) - out.ClusterPermissions = *(*[]operators.StrategyDeploymentPermissions)(unsafe.Pointer(&in.ClusterPermissions)) - return nil -} - -// Convert_v1alpha1_StrategyDetailsDeployment_To_operators_StrategyDetailsDeployment is an autogenerated conversion function. -func Convert_v1alpha1_StrategyDetailsDeployment_To_operators_StrategyDetailsDeployment(in *StrategyDetailsDeployment, out *operators.StrategyDetailsDeployment, s conversion.Scope) error { - return autoConvert_v1alpha1_StrategyDetailsDeployment_To_operators_StrategyDetailsDeployment(in, out, s) -} - -func autoConvert_operators_StrategyDetailsDeployment_To_v1alpha1_StrategyDetailsDeployment(in *operators.StrategyDetailsDeployment, out *StrategyDetailsDeployment, s conversion.Scope) error { - out.DeploymentSpecs = *(*[]StrategyDeploymentSpec)(unsafe.Pointer(&in.DeploymentSpecs)) - out.Permissions = *(*[]StrategyDeploymentPermissions)(unsafe.Pointer(&in.Permissions)) - out.ClusterPermissions = *(*[]StrategyDeploymentPermissions)(unsafe.Pointer(&in.ClusterPermissions)) - return nil -} - -// Convert_operators_StrategyDetailsDeployment_To_v1alpha1_StrategyDetailsDeployment is an autogenerated conversion function. -func Convert_operators_StrategyDetailsDeployment_To_v1alpha1_StrategyDetailsDeployment(in *operators.StrategyDetailsDeployment, out *StrategyDetailsDeployment, s conversion.Scope) error { - return autoConvert_operators_StrategyDetailsDeployment_To_v1alpha1_StrategyDetailsDeployment(in, out, s) -} - -func autoConvert_v1alpha1_Subscription_To_operators_Subscription(in *Subscription, out *operators.Subscription, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Spec = (*operators.SubscriptionSpec)(unsafe.Pointer(in.Spec)) - if err := Convert_v1alpha1_SubscriptionStatus_To_operators_SubscriptionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_Subscription_To_operators_Subscription is an autogenerated conversion function. -func Convert_v1alpha1_Subscription_To_operators_Subscription(in *Subscription, out *operators.Subscription, s conversion.Scope) error { - return autoConvert_v1alpha1_Subscription_To_operators_Subscription(in, out, s) -} - -func autoConvert_operators_Subscription_To_v1alpha1_Subscription(in *operators.Subscription, out *Subscription, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Spec = (*SubscriptionSpec)(unsafe.Pointer(in.Spec)) - if err := Convert_operators_SubscriptionStatus_To_v1alpha1_SubscriptionStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_operators_Subscription_To_v1alpha1_Subscription is an autogenerated conversion function. -func Convert_operators_Subscription_To_v1alpha1_Subscription(in *operators.Subscription, out *Subscription, s conversion.Scope) error { - return autoConvert_operators_Subscription_To_v1alpha1_Subscription(in, out, s) -} - -func autoConvert_v1alpha1_SubscriptionCatalogHealth_To_operators_SubscriptionCatalogHealth(in *SubscriptionCatalogHealth, out *operators.SubscriptionCatalogHealth, s conversion.Scope) error { - out.CatalogSourceRef = (*v1.ObjectReference)(unsafe.Pointer(in.CatalogSourceRef)) - out.LastUpdated = (*metav1.Time)(unsafe.Pointer(in.LastUpdated)) - out.Healthy = in.Healthy - return nil -} - -// Convert_v1alpha1_SubscriptionCatalogHealth_To_operators_SubscriptionCatalogHealth is an autogenerated conversion function. -func Convert_v1alpha1_SubscriptionCatalogHealth_To_operators_SubscriptionCatalogHealth(in *SubscriptionCatalogHealth, out *operators.SubscriptionCatalogHealth, s conversion.Scope) error { - return autoConvert_v1alpha1_SubscriptionCatalogHealth_To_operators_SubscriptionCatalogHealth(in, out, s) -} - -func autoConvert_operators_SubscriptionCatalogHealth_To_v1alpha1_SubscriptionCatalogHealth(in *operators.SubscriptionCatalogHealth, out *SubscriptionCatalogHealth, s conversion.Scope) error { - out.CatalogSourceRef = (*v1.ObjectReference)(unsafe.Pointer(in.CatalogSourceRef)) - out.LastUpdated = (*metav1.Time)(unsafe.Pointer(in.LastUpdated)) - out.Healthy = in.Healthy - return nil -} - -// Convert_operators_SubscriptionCatalogHealth_To_v1alpha1_SubscriptionCatalogHealth is an autogenerated conversion function. -func Convert_operators_SubscriptionCatalogHealth_To_v1alpha1_SubscriptionCatalogHealth(in *operators.SubscriptionCatalogHealth, out *SubscriptionCatalogHealth, s conversion.Scope) error { - return autoConvert_operators_SubscriptionCatalogHealth_To_v1alpha1_SubscriptionCatalogHealth(in, out, s) -} - -func autoConvert_v1alpha1_SubscriptionCondition_To_operators_SubscriptionCondition(in *SubscriptionCondition, out *operators.SubscriptionCondition, s conversion.Scope) error { - out.Type = operators.SubscriptionConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Reason = in.Reason - out.Message = in.Message - out.LastHeartbeatTime = (*metav1.Time)(unsafe.Pointer(in.LastHeartbeatTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - return nil -} - -// Convert_v1alpha1_SubscriptionCondition_To_operators_SubscriptionCondition is an autogenerated conversion function. -func Convert_v1alpha1_SubscriptionCondition_To_operators_SubscriptionCondition(in *SubscriptionCondition, out *operators.SubscriptionCondition, s conversion.Scope) error { - return autoConvert_v1alpha1_SubscriptionCondition_To_operators_SubscriptionCondition(in, out, s) -} - -func autoConvert_operators_SubscriptionCondition_To_v1alpha1_SubscriptionCondition(in *operators.SubscriptionCondition, out *SubscriptionCondition, s conversion.Scope) error { - out.Type = SubscriptionConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Reason = in.Reason - out.Message = in.Message - out.LastHeartbeatTime = (*metav1.Time)(unsafe.Pointer(in.LastHeartbeatTime)) - out.LastTransitionTime = (*metav1.Time)(unsafe.Pointer(in.LastTransitionTime)) - return nil -} - -// Convert_operators_SubscriptionCondition_To_v1alpha1_SubscriptionCondition is an autogenerated conversion function. -func Convert_operators_SubscriptionCondition_To_v1alpha1_SubscriptionCondition(in *operators.SubscriptionCondition, out *SubscriptionCondition, s conversion.Scope) error { - return autoConvert_operators_SubscriptionCondition_To_v1alpha1_SubscriptionCondition(in, out, s) -} - -func autoConvert_v1alpha1_SubscriptionConfig_To_operators_SubscriptionConfig(in *SubscriptionConfig, out *operators.SubscriptionConfig, s conversion.Scope) error { - out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.Resources = in.Resources - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) - out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes)) - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - return nil -} - -// Convert_v1alpha1_SubscriptionConfig_To_operators_SubscriptionConfig is an autogenerated conversion function. -func Convert_v1alpha1_SubscriptionConfig_To_operators_SubscriptionConfig(in *SubscriptionConfig, out *operators.SubscriptionConfig, s conversion.Scope) error { - return autoConvert_v1alpha1_SubscriptionConfig_To_operators_SubscriptionConfig(in, out, s) -} - -func autoConvert_operators_SubscriptionConfig_To_v1alpha1_SubscriptionConfig(in *operators.SubscriptionConfig, out *SubscriptionConfig, s conversion.Scope) error { - out.Selector = (*metav1.LabelSelector)(unsafe.Pointer(in.Selector)) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.Resources = in.Resources - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) - out.Volumes = *(*[]v1.Volume)(unsafe.Pointer(&in.Volumes)) - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - return nil -} - -// Convert_operators_SubscriptionConfig_To_v1alpha1_SubscriptionConfig is an autogenerated conversion function. -func Convert_operators_SubscriptionConfig_To_v1alpha1_SubscriptionConfig(in *operators.SubscriptionConfig, out *SubscriptionConfig, s conversion.Scope) error { - return autoConvert_operators_SubscriptionConfig_To_v1alpha1_SubscriptionConfig(in, out, s) -} - -func autoConvert_v1alpha1_SubscriptionList_To_operators_SubscriptionList(in *SubscriptionList, out *operators.SubscriptionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]operators.Subscription)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_SubscriptionList_To_operators_SubscriptionList is an autogenerated conversion function. -func Convert_v1alpha1_SubscriptionList_To_operators_SubscriptionList(in *SubscriptionList, out *operators.SubscriptionList, s conversion.Scope) error { - return autoConvert_v1alpha1_SubscriptionList_To_operators_SubscriptionList(in, out, s) -} - -func autoConvert_operators_SubscriptionList_To_v1alpha1_SubscriptionList(in *operators.SubscriptionList, out *SubscriptionList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]Subscription)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_operators_SubscriptionList_To_v1alpha1_SubscriptionList is an autogenerated conversion function. -func Convert_operators_SubscriptionList_To_v1alpha1_SubscriptionList(in *operators.SubscriptionList, out *SubscriptionList, s conversion.Scope) error { - return autoConvert_operators_SubscriptionList_To_v1alpha1_SubscriptionList(in, out, s) -} - -func autoConvert_v1alpha1_SubscriptionSpec_To_operators_SubscriptionSpec(in *SubscriptionSpec, out *operators.SubscriptionSpec, s conversion.Scope) error { - out.CatalogSource = in.CatalogSource - out.CatalogSourceNamespace = in.CatalogSourceNamespace - out.Package = in.Package - out.Channel = in.Channel - out.StartingCSV = in.StartingCSV - out.InstallPlanApproval = operators.Approval(in.InstallPlanApproval) - if err := Convert_v1alpha1_SubscriptionConfig_To_operators_SubscriptionConfig(&in.Config, &out.Config, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_SubscriptionSpec_To_operators_SubscriptionSpec is an autogenerated conversion function. -func Convert_v1alpha1_SubscriptionSpec_To_operators_SubscriptionSpec(in *SubscriptionSpec, out *operators.SubscriptionSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_SubscriptionSpec_To_operators_SubscriptionSpec(in, out, s) -} - -func autoConvert_operators_SubscriptionSpec_To_v1alpha1_SubscriptionSpec(in *operators.SubscriptionSpec, out *SubscriptionSpec, s conversion.Scope) error { - out.CatalogSource = in.CatalogSource - out.CatalogSourceNamespace = in.CatalogSourceNamespace - out.Package = in.Package - out.Channel = in.Channel - out.StartingCSV = in.StartingCSV - out.InstallPlanApproval = Approval(in.InstallPlanApproval) - if err := Convert_operators_SubscriptionConfig_To_v1alpha1_SubscriptionConfig(&in.Config, &out.Config, s); err != nil { - return err - } - return nil -} - -// Convert_operators_SubscriptionSpec_To_v1alpha1_SubscriptionSpec is an autogenerated conversion function. -func Convert_operators_SubscriptionSpec_To_v1alpha1_SubscriptionSpec(in *operators.SubscriptionSpec, out *SubscriptionSpec, s conversion.Scope) error { - return autoConvert_operators_SubscriptionSpec_To_v1alpha1_SubscriptionSpec(in, out, s) -} - -func autoConvert_v1alpha1_SubscriptionStatus_To_operators_SubscriptionStatus(in *SubscriptionStatus, out *operators.SubscriptionStatus, s conversion.Scope) error { - out.CurrentCSV = in.CurrentCSV - out.InstalledCSV = in.InstalledCSV - out.Install = (*operators.InstallPlanReference)(unsafe.Pointer(in.Install)) - out.State = operators.SubscriptionState(in.State) - out.Reason = operators.ConditionReason(in.Reason) - out.InstallPlanRef = (*v1.ObjectReference)(unsafe.Pointer(in.InstallPlanRef)) - out.CatalogHealth = *(*[]operators.SubscriptionCatalogHealth)(unsafe.Pointer(&in.CatalogHealth)) - out.Conditions = *(*[]operators.SubscriptionCondition)(unsafe.Pointer(&in.Conditions)) - out.LastUpdated = in.LastUpdated - return nil -} - -// Convert_v1alpha1_SubscriptionStatus_To_operators_SubscriptionStatus is an autogenerated conversion function. -func Convert_v1alpha1_SubscriptionStatus_To_operators_SubscriptionStatus(in *SubscriptionStatus, out *operators.SubscriptionStatus, s conversion.Scope) error { - return autoConvert_v1alpha1_SubscriptionStatus_To_operators_SubscriptionStatus(in, out, s) -} - -func autoConvert_operators_SubscriptionStatus_To_v1alpha1_SubscriptionStatus(in *operators.SubscriptionStatus, out *SubscriptionStatus, s conversion.Scope) error { - out.CurrentCSV = in.CurrentCSV - out.InstalledCSV = in.InstalledCSV - out.Install = (*InstallPlanReference)(unsafe.Pointer(in.Install)) - out.State = SubscriptionState(in.State) - out.Reason = ConditionReason(in.Reason) - out.InstallPlanRef = (*v1.ObjectReference)(unsafe.Pointer(in.InstallPlanRef)) - out.CatalogHealth = *(*[]SubscriptionCatalogHealth)(unsafe.Pointer(&in.CatalogHealth)) - out.Conditions = *(*[]SubscriptionCondition)(unsafe.Pointer(&in.Conditions)) - out.LastUpdated = in.LastUpdated - return nil -} - -// Convert_operators_SubscriptionStatus_To_v1alpha1_SubscriptionStatus is an autogenerated conversion function. -func Convert_operators_SubscriptionStatus_To_v1alpha1_SubscriptionStatus(in *operators.SubscriptionStatus, out *SubscriptionStatus, s conversion.Scope) error { - return autoConvert_operators_SubscriptionStatus_To_v1alpha1_SubscriptionStatus(in, out, s) -} - -func autoConvert_v1alpha1_UpdateStrategy_To_operators_UpdateStrategy(in *UpdateStrategy, out *operators.UpdateStrategy, s conversion.Scope) error { - out.RegistryPoll = (*operators.RegistryPoll)(unsafe.Pointer(in.RegistryPoll)) - return nil -} - -// Convert_v1alpha1_UpdateStrategy_To_operators_UpdateStrategy is an autogenerated conversion function. -func Convert_v1alpha1_UpdateStrategy_To_operators_UpdateStrategy(in *UpdateStrategy, out *operators.UpdateStrategy, s conversion.Scope) error { - return autoConvert_v1alpha1_UpdateStrategy_To_operators_UpdateStrategy(in, out, s) -} - -func autoConvert_operators_UpdateStrategy_To_v1alpha1_UpdateStrategy(in *operators.UpdateStrategy, out *UpdateStrategy, s conversion.Scope) error { - out.RegistryPoll = (*RegistryPoll)(unsafe.Pointer(in.RegistryPoll)) - return nil -} - -// Convert_operators_UpdateStrategy_To_v1alpha1_UpdateStrategy is an autogenerated conversion function. -func Convert_operators_UpdateStrategy_To_v1alpha1_UpdateStrategy(in *operators.UpdateStrategy, out *UpdateStrategy, s conversion.Scope) error { - return autoConvert_operators_UpdateStrategy_To_v1alpha1_UpdateStrategy(in, out, s) -} diff --git a/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/pkg/operators/v1alpha1/zz_generated.deepcopy.go index 4f8d90a94..ff26a7fd2 100644 --- a/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,6 @@ // +build !ignore_autogenerated /* -Copyright 2020 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,23 +15,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package v1alpha1 import ( - json "encoding/json" - - v1 "k8s.io/api/core/v1" + "encoding/json" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference. @@ -62,7 +59,6 @@ func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions. @@ -104,7 +100,6 @@ func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription. @@ -127,14 +122,9 @@ func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) { } if in.Value != nil { in, out := &in.Value, &out.Value - *out = new(json.RawMessage) - if **in != nil { - in, out := *in, *out - *out = make([]byte, len(*in)) - copy(*out, *in) - } + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor. @@ -150,7 +140,6 @@ func (in *ActionDescriptor) DeepCopy() *ActionDescriptor { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AppLink) DeepCopyInto(out *AppLink) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink. @@ -168,7 +157,7 @@ func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { *out = *in if in.CatalogSourceRef != nil { in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.Conditions != nil { @@ -178,7 +167,6 @@ func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup. @@ -202,7 +190,6 @@ func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition. @@ -244,7 +231,6 @@ func (in *CRDDescription) DeepCopyInto(out *CRDDescription) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription. @@ -264,7 +250,6 @@ func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. @@ -297,7 +282,6 @@ func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList. @@ -332,7 +316,6 @@ func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { copy(*out, *in) } out.Icon = in.Icon - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. @@ -367,7 +350,6 @@ func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { *out = new(GRPCConnectionState) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. @@ -387,7 +369,6 @@ func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion. @@ -419,7 +400,6 @@ func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersio in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition. @@ -444,7 +424,6 @@ func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList. @@ -474,7 +453,7 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) if in.NativeAPIs != nil { in, out := &in.NativeAPIs, &out.NativeAPIs - *out = make([]metav1.GroupVersionKind, len(*in)) + *out = make([]v1.GroupVersionKind, len(*in)) copy(*out, *in) } if in.Keywords != nil { @@ -519,10 +498,9 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) + *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec. @@ -568,7 +546,6 @@ func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionSt in, out := &in.CertsRotateAt, &out.CertsRotateAt *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus. @@ -585,7 +562,6 @@ func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus { func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) { *out = *in in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference. @@ -615,7 +591,6 @@ func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions. @@ -631,7 +606,6 @@ func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DependentStatus) DeepCopyInto(out *DependentStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus. @@ -648,7 +622,6 @@ func (in *DependentStatus) DeepCopy() *DependentStatus { func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { *out = *in in.LastConnectTime.DeepCopyInto(&out.LastConnectTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState. @@ -664,7 +637,6 @@ func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Icon) DeepCopyInto(out *Icon) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon. @@ -680,7 +652,6 @@ func (in *Icon) DeepCopy() *Icon { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstallMode) DeepCopyInto(out *InstallMode) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode. @@ -701,7 +672,6 @@ func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) { for key, val := range *in { (*out)[key] = val } - return } } @@ -722,7 +692,6 @@ func (in *InstallPlan) DeepCopyInto(out *InstallPlan) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan. @@ -754,7 +723,6 @@ func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition. @@ -779,7 +747,6 @@ func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList. @@ -803,7 +770,6 @@ func (in *InstallPlanList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference. @@ -824,7 +790,6 @@ func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec. @@ -872,10 +837,9 @@ func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) { } if in.AttenuatedServiceAccountRef != nil { in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus. @@ -891,7 +855,6 @@ func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Maintainer) DeepCopyInto(out *Maintainer) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer. @@ -908,7 +871,6 @@ func (in *Maintainer) DeepCopy() *Maintainer { func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) { *out = *in in.StrategySpec.DeepCopyInto(&out.StrategySpec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy. @@ -926,10 +888,9 @@ func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) { *out = *in if in.Interval != nil { in, out := &in.Interval, &out.Interval - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll. @@ -946,7 +907,6 @@ func (in *RegistryPoll) DeepCopy() *RegistryPoll { func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) { *out = *in in.CreatedAt.DeepCopyInto(&out.CreatedAt) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus. @@ -967,7 +927,6 @@ func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) { *out = make([]DependentStatus, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus. @@ -990,14 +949,9 @@ func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) { } if in.Value != nil { in, out := &in.Value, &out.Value - *out = new(json.RawMessage) - if **in != nil { - in, out := *in, *out - *out = make([]byte, len(*in)) - copy(*out, *in) - } + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor. @@ -1020,14 +974,9 @@ func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) { } if in.Value != nil { in, out := &in.Value, &out.Value - *out = new(json.RawMessage) - if **in != nil { - in, out := *in, *out - *out = make([]byte, len(*in)) - copy(*out, *in) - } + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor. @@ -1044,7 +993,6 @@ func (in *StatusDescriptor) DeepCopy() *StatusDescriptor { func (in *Step) DeepCopyInto(out *Step) { *out = *in out.Resource = in.Resource - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. @@ -1060,7 +1008,6 @@ func (in *Step) DeepCopy() *Step { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepResource) DeepCopyInto(out *StepResource) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource. @@ -1083,7 +1030,6 @@ func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPer (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions. @@ -1100,7 +1046,6 @@ func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissio func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) { *out = *in in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec. @@ -1137,7 +1082,6 @@ func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment. @@ -1161,7 +1105,6 @@ func (in *Subscription) DeepCopyInto(out *Subscription) { (*in).DeepCopyInto(*out) } in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. @@ -1187,14 +1130,13 @@ func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth *out = *in if in.CatalogSourceRef != nil { in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.LastUpdated != nil { in, out := &in.LastUpdated, &out.LastUpdated *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth. @@ -1218,7 +1160,6 @@ func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition. @@ -1236,7 +1177,7 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) + *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } if in.NodeSelector != nil { @@ -1248,7 +1189,7 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1256,33 +1197,32 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { in.Resources.DeepCopyInto(&out.Resources) if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) + *out = make([]corev1.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + *out = make([]corev1.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. @@ -1307,7 +1247,6 @@ func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. @@ -1332,7 +1271,6 @@ func (in *SubscriptionList) DeepCopyObject() runtime.Object { func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { *out = *in in.Config.DeepCopyInto(&out.Config) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. @@ -1355,7 +1293,7 @@ func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { } if in.InstallPlanRef != nil { in, out := &in.InstallPlanRef, &out.InstallPlanRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.CatalogHealth != nil { @@ -1373,7 +1311,6 @@ func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { } } in.LastUpdated.DeepCopyInto(&out.LastUpdated) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. @@ -1394,7 +1331,6 @@ func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { *out = new(RegistryPoll) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. diff --git a/pkg/operators/v2alpha1/groupversion_info.go b/pkg/operators/v2alpha1/groupversion_info.go new file mode 100644 index 000000000..613d9ab90 --- /dev/null +++ b/pkg/operators/v2alpha1/groupversion_info.go @@ -0,0 +1,34 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v2alpha1 contains API Schema definitions for the discovery v2alpha1 API group. +// +kubebuilder:object:generate=true +// +groupName=operators.coreos.com +package v2alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "operators.coreos.com", Version: "v2alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/operators/v2alpha1/operator_types.go b/pkg/operators/v2alpha1/operator_types.go new file mode 100644 index 000000000..c5920298b --- /dev/null +++ b/pkg/operators/v2alpha1/operator_types.go @@ -0,0 +1,102 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OperatorSpec defines the desired state of Operator +type OperatorSpec struct{} + +// OperatorStatus describes the observed state of an operator and its components. +type OperatorStatus struct { + // Components describes resources that compose the operator. + // +optional + Components *Components `json:"components,omitempty"` +} + +// ConditionType codifies a condition's type. +type ConditionType string + +// Condition represent the latest available observations of an component's state. +type Condition struct { + // Type of condition. + Type ConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` + // Last time the condition was probed + // +optional + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` +} + +// Components tracks the resources that compose an operator. +type Components struct { + // LabelSelector is a label query over a set of resources used to select the operator's components + LabelSelector *metav1.LabelSelector `json:"labelSelector"` + // Refs are a set of references to the operator's component resources, selected with LabelSelector. + // +optional + Refs []RichReference `json:"refs,omitempty"` +} + +// RichReference is a reference to a resource, enriched with its status conditions. +type RichReference struct { + *corev1.ObjectReference `json:",inline"` + // Conditions represents the latest state of the component. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// +genclient +// +genclient:nonNamespaced +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster +// +kubebuilder:storageversion + +// Operator represents a cluster operator. +type Operator struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec OperatorSpec `json:"spec,omitempty"` + Status OperatorStatus `json:"status,omitempty"` +} + +// +genclient:nonNamespaced +// +kubebuilder:object:root=true + +// OperatorList contains a list of Operators. +type OperatorList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Operator `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Operator{}, &OperatorList{}) +} diff --git a/pkg/operators/v2alpha1/zz_generated.deepcopy.go b/pkg/operators/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..e8ca503cf --- /dev/null +++ b/pkg/operators/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,197 @@ +// +build !ignore_autogenerated + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Components) DeepCopyInto(out *Components) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Refs != nil { + in, out := &in.Refs, &out.Refs + *out = make([]RichReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Components. +func (in *Components) DeepCopy() *Components { + if in == nil { + return nil + } + out := new(Components) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + if in.LastUpdateTime != nil { + in, out := &in.LastUpdateTime, &out.LastUpdateTime + *out = (*in).DeepCopy() + } + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Operator) DeepCopyInto(out *Operator) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Operator. +func (in *Operator) DeepCopy() *Operator { + if in == nil { + return nil + } + out := new(Operator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Operator) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorList) DeepCopyInto(out *OperatorList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Operator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorList. +func (in *OperatorList) DeepCopy() *OperatorList { + if in == nil { + return nil + } + out := new(OperatorList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OperatorList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorSpec) DeepCopyInto(out *OperatorSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorSpec. +func (in *OperatorSpec) DeepCopy() *OperatorSpec { + if in == nil { + return nil + } + out := new(OperatorSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorStatus) DeepCopyInto(out *OperatorStatus) { + *out = *in + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = new(Components) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorStatus. +func (in *OperatorStatus) DeepCopy() *OperatorStatus { + if in == nil { + return nil + } + out := new(OperatorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RichReference) DeepCopyInto(out *RichReference) { + *out = *in + if in.ObjectReference != nil { + in, out := &in.ObjectReference, &out.ObjectReference + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RichReference. +func (in *RichReference) DeepCopy() *RichReference { + if in == nil { + return nil + } + out := new(RichReference) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/operators/zz_generated.deepcopy.go b/pkg/operators/zz_generated.deepcopy.go index f006514e7..8b8760842 100644 --- a/pkg/operators/zz_generated.deepcopy.go +++ b/pkg/operators/zz_generated.deepcopy.go @@ -1,7 +1,6 @@ // +build !ignore_autogenerated /* -Copyright 2020 Red Hat, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,23 +15,21 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by controller-gen. DO NOT EDIT. package operators import ( - json "encoding/json" - - v1 "k8s.io/api/core/v1" + "encoding/json" + corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIResourceReference) DeepCopyInto(out *APIResourceReference) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceReference. @@ -62,7 +59,6 @@ func (in *APIServiceDefinitions) DeepCopyInto(out *APIServiceDefinitions) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDefinitions. @@ -104,7 +100,6 @@ func (in *APIServiceDescription) DeepCopyInto(out *APIServiceDescription) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceDescription. @@ -127,14 +122,9 @@ func (in *ActionDescriptor) DeepCopyInto(out *ActionDescriptor) { } if in.Value != nil { in, out := &in.Value, &out.Value - *out = new(json.RawMessage) - if **in != nil { - in, out := *in, *out - *out = make([]byte, len(*in)) - copy(*out, *in) - } + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActionDescriptor. @@ -150,7 +140,6 @@ func (in *ActionDescriptor) DeepCopy() *ActionDescriptor { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AppLink) DeepCopyInto(out *AppLink) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppLink. @@ -168,7 +157,7 @@ func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { *out = *in if in.CatalogSourceRef != nil { in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.Conditions != nil { @@ -178,7 +167,6 @@ func (in *BundleLookup) DeepCopyInto(out *BundleLookup) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookup. @@ -202,7 +190,6 @@ func (in *BundleLookupCondition) DeepCopyInto(out *BundleLookupCondition) { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BundleLookupCondition. @@ -244,7 +231,6 @@ func (in *CRDDescription) DeepCopyInto(out *CRDDescription) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDDescription. @@ -264,7 +250,6 @@ func (in *CatalogSource) DeepCopyInto(out *CatalogSource) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSource. @@ -297,7 +282,6 @@ func (in *CatalogSourceList) DeepCopyInto(out *CatalogSourceList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceList. @@ -332,7 +316,6 @@ func (in *CatalogSourceSpec) DeepCopyInto(out *CatalogSourceSpec) { copy(*out, *in) } out.Icon = in.Icon - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceSpec. @@ -367,7 +350,6 @@ func (in *CatalogSourceStatus) DeepCopyInto(out *CatalogSourceStatus) { in, out := &in.LatestImageRegistryPoll, &out.LatestImageRegistryPoll *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CatalogSourceStatus. @@ -387,7 +369,6 @@ func (in *ClusterServiceVersion) DeepCopyInto(out *ClusterServiceVersion) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersion. @@ -419,7 +400,6 @@ func (in *ClusterServiceVersionCondition) DeepCopyInto(out *ClusterServiceVersio in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionCondition. @@ -444,7 +424,6 @@ func (in *ClusterServiceVersionList) DeepCopyInto(out *ClusterServiceVersionList (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionList. @@ -474,7 +453,7 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec in.APIServiceDefinitions.DeepCopyInto(&out.APIServiceDefinitions) if in.NativeAPIs != nil { in, out := &in.NativeAPIs, &out.NativeAPIs - *out = make([]metav1.GroupVersionKind, len(*in)) + *out = make([]v1.GroupVersionKind, len(*in)) copy(*out, *in) } if in.Keywords != nil { @@ -519,10 +498,9 @@ func (in *ClusterServiceVersionSpec) DeepCopyInto(out *ClusterServiceVersionSpec } if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) + *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionSpec. @@ -568,7 +546,6 @@ func (in *ClusterServiceVersionStatus) DeepCopyInto(out *ClusterServiceVersionSt in, out := &in.CertsRotateAt, &out.CertsRotateAt *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterServiceVersionStatus. @@ -585,7 +562,6 @@ func (in *ClusterServiceVersionStatus) DeepCopy() *ClusterServiceVersionStatus { func (in *ConfigMapResourceReference) DeepCopyInto(out *ConfigMapResourceReference) { *out = *in in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapResourceReference. @@ -615,7 +591,6 @@ func (in *CustomResourceDefinitions) DeepCopyInto(out *CustomResourceDefinitions (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomResourceDefinitions. @@ -631,7 +606,6 @@ func (in *CustomResourceDefinitions) DeepCopy() *CustomResourceDefinitions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DependentStatus) DeepCopyInto(out *DependentStatus) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependentStatus. @@ -648,7 +622,6 @@ func (in *DependentStatus) DeepCopy() *DependentStatus { func (in *GRPCConnectionState) DeepCopyInto(out *GRPCConnectionState) { *out = *in in.LastConnectTime.DeepCopyInto(&out.LastConnectTime) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionState. @@ -664,7 +637,6 @@ func (in *GRPCConnectionState) DeepCopy() *GRPCConnectionState { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Icon) DeepCopyInto(out *Icon) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Icon. @@ -680,7 +652,6 @@ func (in *Icon) DeepCopy() *Icon { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstallMode) DeepCopyInto(out *InstallMode) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallMode. @@ -701,7 +672,6 @@ func (in InstallModeSet) DeepCopyInto(out *InstallModeSet) { for key, val := range *in { (*out)[key] = val } - return } } @@ -722,7 +692,6 @@ func (in *InstallPlan) DeepCopyInto(out *InstallPlan) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlan. @@ -754,7 +723,6 @@ func (in *InstallPlanCondition) DeepCopyInto(out *InstallPlanCondition) { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanCondition. @@ -779,7 +747,6 @@ func (in *InstallPlanList) DeepCopyInto(out *InstallPlanList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanList. @@ -803,7 +770,6 @@ func (in *InstallPlanList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstallPlanReference) DeepCopyInto(out *InstallPlanReference) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanReference. @@ -824,7 +790,6 @@ func (in *InstallPlanSpec) DeepCopyInto(out *InstallPlanSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanSpec. @@ -872,10 +837,9 @@ func (in *InstallPlanStatus) DeepCopyInto(out *InstallPlanStatus) { } if in.AttenuatedServiceAccountRef != nil { in, out := &in.AttenuatedServiceAccountRef, &out.AttenuatedServiceAccountRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstallPlanStatus. @@ -891,7 +855,6 @@ func (in *InstallPlanStatus) DeepCopy() *InstallPlanStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Maintainer) DeepCopyInto(out *Maintainer) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Maintainer. @@ -908,7 +871,6 @@ func (in *Maintainer) DeepCopy() *Maintainer { func (in *NamedInstallStrategy) DeepCopyInto(out *NamedInstallStrategy) { *out = *in in.StrategySpec.DeepCopyInto(&out.StrategySpec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedInstallStrategy. @@ -928,7 +890,6 @@ func (in *OperatorGroup) DeepCopyInto(out *OperatorGroup) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroup. @@ -961,7 +922,6 @@ func (in *OperatorGroupList) DeepCopyInto(out *OperatorGroupList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupList. @@ -987,7 +947,7 @@ func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) { *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) + *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } if in.TargetNamespaces != nil { @@ -995,7 +955,6 @@ func (in *OperatorGroupSpec) DeepCopyInto(out *OperatorGroupSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupSpec. @@ -1018,14 +977,13 @@ func (in *OperatorGroupStatus) DeepCopyInto(out *OperatorGroupStatus) { } if in.ServiceAccountRef != nil { in, out := &in.ServiceAccountRef, &out.ServiceAccountRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.LastUpdated != nil { in, out := &in.LastUpdated, &out.LastUpdated *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorGroupStatus. @@ -1043,10 +1001,9 @@ func (in *RegistryPoll) DeepCopyInto(out *RegistryPoll) { *out = *in if in.Interval != nil { in, out := &in.Interval, &out.Interval - *out = new(metav1.Duration) + *out = new(v1.Duration) **out = **in } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryPoll. @@ -1063,7 +1020,6 @@ func (in *RegistryPoll) DeepCopy() *RegistryPoll { func (in *RegistryServiceStatus) DeepCopyInto(out *RegistryServiceStatus) { *out = *in in.CreatedAt.DeepCopyInto(&out.CreatedAt) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryServiceStatus. @@ -1084,7 +1040,6 @@ func (in *RequirementStatus) DeepCopyInto(out *RequirementStatus) { *out = make([]DependentStatus, len(*in)) copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequirementStatus. @@ -1107,14 +1062,9 @@ func (in *SpecDescriptor) DeepCopyInto(out *SpecDescriptor) { } if in.Value != nil { in, out := &in.Value, &out.Value - *out = new(json.RawMessage) - if **in != nil { - in, out := *in, *out - *out = make([]byte, len(*in)) - copy(*out, *in) - } + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpecDescriptor. @@ -1137,14 +1087,9 @@ func (in *StatusDescriptor) DeepCopyInto(out *StatusDescriptor) { } if in.Value != nil { in, out := &in.Value, &out.Value - *out = new(json.RawMessage) - if **in != nil { - in, out := *in, *out - *out = make([]byte, len(*in)) - copy(*out, *in) - } + *out = make(json.RawMessage, len(*in)) + copy(*out, *in) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDescriptor. @@ -1161,7 +1106,6 @@ func (in *StatusDescriptor) DeepCopy() *StatusDescriptor { func (in *Step) DeepCopyInto(out *Step) { *out = *in out.Resource = in.Resource - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Step. @@ -1177,7 +1121,6 @@ func (in *Step) DeepCopy() *Step { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StepResource) DeepCopyInto(out *StepResource) { *out = *in - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepResource. @@ -1200,7 +1143,6 @@ func (in *StrategyDeploymentPermissions) DeepCopyInto(out *StrategyDeploymentPer (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentPermissions. @@ -1217,7 +1159,6 @@ func (in *StrategyDeploymentPermissions) DeepCopy() *StrategyDeploymentPermissio func (in *StrategyDeploymentSpec) DeepCopyInto(out *StrategyDeploymentSpec) { *out = *in in.Spec.DeepCopyInto(&out.Spec) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDeploymentSpec. @@ -1254,7 +1195,6 @@ func (in *StrategyDetailsDeployment) DeepCopyInto(out *StrategyDetailsDeployment (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StrategyDetailsDeployment. @@ -1278,7 +1218,6 @@ func (in *Subscription) DeepCopyInto(out *Subscription) { (*in).DeepCopyInto(*out) } in.Status.DeepCopyInto(&out.Status) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription. @@ -1304,14 +1243,13 @@ func (in *SubscriptionCatalogHealth) DeepCopyInto(out *SubscriptionCatalogHealth *out = *in if in.CatalogSourceRef != nil { in, out := &in.CatalogSourceRef, &out.CatalogSourceRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.LastUpdated != nil { in, out := &in.LastUpdated, &out.LastUpdated *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCatalogHealth. @@ -1335,7 +1273,6 @@ func (in *SubscriptionCondition) DeepCopyInto(out *SubscriptionCondition) { in, out := &in.LastTransitionTime, &out.LastTransitionTime *out = (*in).DeepCopy() } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionCondition. @@ -1353,7 +1290,7 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { *out = *in if in.Selector != nil { in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) + *out = new(v1.LabelSelector) (*in).DeepCopyInto(*out) } if in.NodeSelector != nil { @@ -1365,7 +1302,7 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { } if in.Tolerations != nil { in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) + *out = make([]corev1.Toleration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1373,33 +1310,32 @@ func (in *SubscriptionConfig) DeepCopyInto(out *SubscriptionConfig) { in.Resources.DeepCopyInto(&out.Resources) if in.EnvFrom != nil { in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) + *out = make([]corev1.EnvFromSource, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Env != nil { in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) + *out = make([]corev1.EnvVar, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.Volumes != nil { in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) + *out = make([]corev1.Volume, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.VolumeMounts != nil { in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) + *out = make([]corev1.VolumeMount, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionConfig. @@ -1424,7 +1360,6 @@ func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList. @@ -1449,7 +1384,6 @@ func (in *SubscriptionList) DeepCopyObject() runtime.Object { func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) { *out = *in in.Config.DeepCopyInto(&out.Config) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec. @@ -1472,7 +1406,7 @@ func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { } if in.InstallPlanRef != nil { in, out := &in.InstallPlanRef, &out.InstallPlanRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.CatalogHealth != nil { @@ -1490,7 +1424,6 @@ func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) { } } in.LastUpdated.DeepCopyInto(&out.LastUpdated) - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus. @@ -1511,7 +1444,6 @@ func (in *UpdateStrategy) DeepCopyInto(out *UpdateStrategy) { *out = new(RegistryPoll) (*in).DeepCopyInto(*out) } - return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateStrategy. diff --git a/tools.go b/tools.go new file mode 100644 index 000000000..0fcf931eb --- /dev/null +++ b/tools.go @@ -0,0 +1,12 @@ +// +build tools + +package tools + +import ( + // Generate deepcopy and conversion. + _ "sigs.k8s.io/controller-tools/cmd/controller-gen" + // Manipulate YAML. + _ "github.com/mikefarah/yq/v2" + // Generate embedded files. + _ "github.com/go-bindata/go-bindata/v3" +) diff --git a/vendor/github.com/fatih/color/.travis.yml b/vendor/github.com/fatih/color/.travis.yml new file mode 100644 index 000000000..95f8a1ff5 --- /dev/null +++ b/vendor/github.com/fatih/color/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.8.x + - tip + diff --git a/vendor/github.com/fatih/color/Gopkg.lock b/vendor/github.com/fatih/color/Gopkg.lock new file mode 100644 index 000000000..7d879e9ca --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.lock @@ -0,0 +1,27 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/fatih/color/Gopkg.toml b/vendor/github.com/fatih/color/Gopkg.toml new file mode 100644 index 000000000..ff1617f71 --- /dev/null +++ b/vendor/github.com/fatih/color/Gopkg.toml @@ -0,0 +1,30 @@ + +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/mattn/go-colorable" + version = "0.0.9" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 000000000..25fdaf639 --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 000000000..3fc954460 --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,179 @@ +# Color [![GoDoc](https://godoc.org/github.com/fatih/color?status.svg)](https://godoc.org/github.com/fatih/color) [![Build Status](https://img.shields.io/travis/fatih/color.svg?style=flat-square)](https://travis-ci.org/fatih/color) + + + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + + +![Color](https://i.imgur.com/c1JI0lA.png) + + +## Install + +```bash +go get github.com/fatih/color +``` + +Note that the `vendor` folder is here for stability. Remove the folder if you +already have the dependencies in your GOPATH. + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`) + +`Color` has support to disable/enable colors both globally and for single color +definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You +can easily disable the color output with: + +```go + +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details + diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 000000000..91c8e9f06 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,603 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. This is a global option and affects all colors. For more control + // over each color block use the methods DisableColor() individually. + NoColor = os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{params: make([]Attribute, 0)} + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user setted action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 000000000..cf1e96500 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,133 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/go-bindata/go-bindata/v3/.drone.yml b/vendor/github.com/go-bindata/go-bindata/v3/.drone.yml new file mode 100644 index 000000000..cdd4af10a --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/.drone.yml @@ -0,0 +1,13 @@ +kind: pipeline +name: default + +workspace: + base: /go + path: github.com/go-bindata/go-bindata +steps: +- name: build + image: golang:1.12 + commands: + - go get -u honnef.co/go/tools/cmd/staticcheck + - staticcheck ./... + - make \ No newline at end of file diff --git a/vendor/github.com/go-bindata/go-bindata/v3/.gitignore b/vendor/github.com/go-bindata/go-bindata/v3/.gitignore new file mode 100644 index 000000000..b9abe353d --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/.gitignore @@ -0,0 +1,16 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Goland project files +.idea/ +*.iml diff --git a/vendor/github.com/go-bindata/go-bindata/v3/CONTRIBUTING.md b/vendor/github.com/go-bindata/go-bindata/v3/CONTRIBUTING.md new file mode 100644 index 000000000..e0732f54e --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/CONTRIBUTING.md @@ -0,0 +1,79 @@ +## Contribution guidelines. + +So you wish to contribute to this project? Fantastic! +Here are a few guidelines to help you do this in a +streamlined fashion. + + +## Bug reports + +When supplying a bug report, please consider the following guidelines. +These serve to make it easier for us to address the issue and find a solution. +Most of these are pretty self-evident, but sometimes it is still necessary +to reiterate them. + +* Be clear in the way you express the problem. Use simple language and + just enough of it to clearly define the issue. Not everyone is a native + English speaker. And while most can handle themselves pretty well, + it helps to stay away from more esoteric vocabulary. + + Be patient with non-native English speakers. If their bug reports + or comments are hard to understand, just ask for clarification. + Do not start guessing at their meaning, as this may just lead to + more confusion and misunderstandings. +* Clearly define any information which is relevant to the problem. + This includes library versions, operating system and any other + external dependencies which may be needed. +* Where applicable, provide a step-by-step listing of the way to + reproduce the problem. Make sure this is the simplest possible + way to do so. Omit any and all unneccesary steps, because they may + just complicate our understanding of the real problem. + If need be, create a whole new code project on your local machine, + which specifically tries to create the problem you are running into; + nothing more, nothing less. + + Include this program in the bug report. It often suffices to paste + the code in a [Gist](https://gist.github.com) or on the + [Go playground](http://play.golang.org). +* If possible, provide us with a listing of the steps you have already + undertaken to solve the problem. This can save us a great deal of + wasted time, trying out solutions you have already covered. + + +## Pull requests + +Bug reports are great. Supplying fixes to bugs is even better. +When submitting a pull request, the following guidelines are +good to keep in mind: + +* `go fmt`: **Always** run your code through `go fmt`, before + committing it. Code has to be readable by many different + people. And the only way this will be as painless as possible, + is if we all stick to the same code style. + + Some of our projects may have automated build-servers hooked up + to commit hooks. These will vet any submitted code and determine + if it meets a set of properties. One of which is code formatting. + These servers will outright deny a submission which has not been + run through `go fmt`, even if the code itself is correct. + + We try to maintain a zero-tolerance policy on this matter, + because consistently formatted code makes life a great deal + easier for everyone involved. +* Commit log messages: When committing changes, do so often and + clearly -- Even if you have changed only 1 character in a code + comment. This means that commit log messages should clearly state + exactly what the change does and why. If it fixes a known issue, + then mention the issue number in the commit log. E.g.: + + > Fixes return value for `foo/boo.Baz()` to be consistent with + > the rest of the API. This addresses issue #32 + + Do not pile a lot of unrelated changes into a single commit. + Pick and choose only those changes for a single commit, which are + directly related. We would much rather see a hundred commits + saying nothing but `"Runs go fmt"` in between any real fixes + than have these style changes embedded in those real fixes. + It creates a lot of noise when trying to review code. + + diff --git a/vendor/github.com/go-bindata/go-bindata/v3/LICENSE b/vendor/github.com/go-bindata/go-bindata/v3/LICENSE new file mode 100644 index 000000000..c07a9311f --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/LICENSE @@ -0,0 +1,3 @@ +This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +license. Its contents can be found at: +http://creativecommons.org/publicdomain/zero/1.0 diff --git a/vendor/github.com/go-bindata/go-bindata/v3/Makefile b/vendor/github.com/go-bindata/go-bindata/v3/Makefile new file mode 100644 index 000000000..84b661cb2 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/Makefile @@ -0,0 +1,2 @@ +all: + make -C testdata diff --git a/vendor/github.com/go-bindata/go-bindata/v3/README.md b/vendor/github.com/go-bindata/go-bindata/v3/README.md new file mode 100644 index 000000000..62bed5e7e --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/README.md @@ -0,0 +1,201 @@ +## bindata +[![Build Status](https://cloud.drone.io/api/badges/go-bindata/go-bindata/status.svg)](https://cloud.drone.io/go-bindata/go-bindata) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-bindata/bindata)](https://goreportcard.com/report/github.com/go-bindata/bindata) + +This package converts any file into managable Go source code. Useful for +embedding binary data into a go program. The file data is optionally gzip +compressed before being converted to a raw byte slice. + +It comes with a command line tool in the `go-bindata` sub directory. +This tool offers a set of command line options, used to customize the +output being generated. + + +### Installation + +To install the library and command line program, use the following: + + go get -u github.com/go-bindata/go-bindata/... + + +### Usage + +Conversion is done on one or more sets of files. They are all embedded in a new +Go source file, along with a table of contents and an `Asset` function, +which allows quick access to the asset, based on its name. + +The simplest invocation generates a `bindata.go` file in the current +working directory. It includes all assets from the `data` directory. + + $ go-bindata data/ + +To include all input sub-directories recursively, use the elipsis postfix +as defined for Go import paths. Otherwise it will only consider assets in the +input directory itself. + + $ go-bindata data/... + +To specify the name of the output file being generated, we use the following: + + $ go-bindata -o myfile.go data/ + +Multiple input directories can be specified if necessary. + + $ go-bindata dir1/... /path/to/dir2/... dir3 + + +The following paragraphs detail some of the command line options which can be +supplied to `go-bindata`. Refer to the `testdata/out` directory for various +output examples from the assets in `testdata/in`. Each example uses different +command line options. + +To ignore files, pass in regexes using -ignore, for example: + + $ go-bindata -ignore=\\.gitignore data/... + +### Accessing an asset + +To access asset data, we use the `Asset(string) ([]byte, error)` function which +is included in the generated output. + +```go +data, err := Asset("pub/style/foo.css") +if err != nil { + // Asset was not found. +} + +// use asset data +``` + + +### Debug vs Release builds + +When invoking the program with the `-debug` flag, the generated code does +not actually include the asset data. Instead, it generates function stubs +which load the data from the original file on disk. The asset API remains +identical between debug and release builds, so your code will not have to +change. + +This is useful during development when you expect the assets to change often. +The host application using these assets uses the same API in both cases and +will not have to care where the actual data comes from. + +An example is a Go webserver with some embedded, static web content like +HTML, JS and CSS files. While developing it, you do not want to rebuild the +whole server and restart it every time you make a change to a bit of +javascript. You just want to build and launch the server once. Then just press +refresh in the browser to see those changes. Embedding the assets with the +`debug` flag allows you to do just that. When you are finished developing and +ready for deployment, just re-invoke `go-bindata` without the `-debug` flag. +It will now embed the latest version of the assets. + + +### Lower memory footprint + +Using the `-nomemcopy` flag, will alter the way the output file is generated. +It will employ a hack that allows us to read the file data directly from +the compiled program's `.rodata` section. This ensures that when we +call our generated function, we omit unnecessary memcopies. + +The downside of this, is that it requires dependencies on the `reflect` and +`unsafe` packages. These may be restricted on platforms like AppEngine and +thus prevent you from using this mode. + +Another disadvantage is that the byte slice we create, is strictly read-only. +For most use-cases this is not a problem, but if you ever try to alter the +returned byte slice, a runtime panic is thrown. Use this mode only on target +platforms where memory constraints are an issue. + +The default behaviour is to use the old code generation method. This +prevents the two previously mentioned issues, but will employ at least one +extra memcopy and thus increase memory requirements. + +For instance, consider the following two examples: + +This would be the default mode, using an extra memcopy but gives a safe +implementation without dependencies on `reflect` and `unsafe`: + +```go +func myfile() []byte { + return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a} +} +``` + +Here is the same functionality, but uses the `.rodata` hack. +The byte slice returned from this example can not be written to without +generating a runtime error. + +```go +var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a" + +func myfile() []byte { + var empty [0]byte + sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile)) + b := empty[:] + bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bx.Data = sx.Data + bx.Len = len(_myfile) + bx.Cap = bx.Len + return b +} +``` + + +### Optional compression + +When the `-nocompress` flag is given, the supplied resource is *not* GZIP +compressed before being turned into Go code. The data should still be accessed +through a function call, so nothing changes in the usage of the generated file. + +This feature is useful if you do not care for compression, or the supplied +resource is already compressed. Doing it again would not add any value and may +even increase the size of the data. + +The default behaviour of the program is to use compression. + + +### Path prefix stripping + +The keys used in the `_bindata` map, are the same as the input file name +passed to `go-bindata`. This includes the path. In most cases, this is not +desireable, as it puts potentially sensitive information in your code base. +For this purpose, the tool supplies another command line flag `-prefix`. +This accepts a portion of a path name, which should be stripped off from +the map keys and function names. + +For example, running without the `-prefix` flag, we get: + + $ go-bindata /path/to/templates/ + + _bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html + +Running with the `-prefix` flag, we get: + + $ go-bindata -prefix "/path/to/" /path/to/templates/ + + _bindata["templates/foo.html"] = templates_foo_html + + +### Build tags + +With the optional `-tags` flag, you can specify any go build tags that +must be fulfilled for the output file to be included in a build. This +is useful when including binary data in multiple formats, where the desired +format is specified at build time with the appropriate tags. + +The tags are appended to a `// +build` line in the beginning of the output file +and must follow the build tags syntax specified by the go tool. + +### Serve assets with `net/http` + +With the `-fs` flag, `go-bindata` will add an `AssetFile()` method returning an `http.FileSystem` interface: + + $ go-bindata -fs -prefix "static/" static/ + +Use `-prefix` flag to strip first level dir, then in your `net/http` router, you can use `AssetFile()` with `http.FileServer()` like: + +```go +mux := http.NewServeMux() +mux.Handle("/static", http.FileServer(AssetFile())) +http.ListenAndServe(":8080", mux) +``` diff --git a/vendor/github.com/go-bindata/go-bindata/v3/_config.yml b/vendor/github.com/go-bindata/go-bindata/v3/_config.yml new file mode 100644 index 000000000..c4192631f --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-cayman \ No newline at end of file diff --git a/vendor/github.com/go-bindata/go-bindata/v3/asset.go b/vendor/github.com/go-bindata/go-bindata/v3/asset.go new file mode 100644 index 000000000..95b6b94f3 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/asset.go @@ -0,0 +1,12 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +// Asset holds information about a single asset to be processed. +type Asset struct { + Path string // Full file path. + Name string // Key used in TOC -- name by which asset is referenced. + Func string // Function name for the procedure returning the asset contents. +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/bytewriter.go b/vendor/github.com/go-bindata/go-bindata/v3/bytewriter.go new file mode 100644 index 000000000..05d6d6780 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/bytewriter.go @@ -0,0 +1,44 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "fmt" + "io" +) + +var ( + newline = []byte{'\n'} + dataindent = []byte{'\t', '\t'} + space = []byte{' '} +) + +type ByteWriter struct { + io.Writer + c int +} + +func (w *ByteWriter) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + + for n = range p { + if w.c%12 == 0 { + w.Writer.Write(newline) + w.Writer.Write(dataindent) + w.c = 0 + } else { + w.Writer.Write(space) + } + + fmt.Fprintf(w.Writer, "0x%02x,", p[n]) + w.c++ + } + + n++ + + return +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/config.go b/vendor/github.com/go-bindata/go-bindata/v3/config.go new file mode 100644 index 000000000..8a3a5f602 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/config.go @@ -0,0 +1,209 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "fmt" + "os" + "path/filepath" + "regexp" +) + +// InputConfig defines options on a asset directory to be convert. +type InputConfig struct { + // Path defines a directory containing asset files to be included + // in the generated output. + Path string + + // Recusive defines whether subdirectories of Path + // should be recursively included in the conversion. + Recursive bool +} + +// Config defines a set of options for the asset conversion. +type Config struct { + // Name of the package to use. Defaults to 'main'. + Package string + + // Tags specify a set of optional build tags, which should be + // included in the generated output. The tags are appended to a + // `// +build` line in the beginning of the output file + // and must follow the build tags syntax specified by the go tool. + Tags string + + // Input defines the directory path, containing all asset files as + // well as whether to recursively process assets in any sub directories. + Input []InputConfig + + // Output defines the output file for the generated code. + // If left empty, this defaults to 'bindata.go' in the current + // working directory. + Output string + + // Prefix defines a path prefix which should be stripped from all + // file names when generating the keys in the table of contents. + // For example, running without the `-prefix` flag, we get: + // + // $ go-bindata /path/to/templates + // go_bindata["/path/to/templates/foo.html"] = _path_to_templates_foo_html + // + // Running with the `-prefix` flag, we get: + // + // $ go-bindata -prefix "/path/to/" /path/to/templates/foo.html + // go_bindata["templates/foo.html"] = templates_foo_html + Prefix string + + // NoMemCopy will alter the way the output file is generated. + // + // It will employ a hack that allows us to read the file data directly from + // the compiled program's `.rodata` section. This ensures that when we call + // call our generated function, we omit unnecessary mem copies. + // + // The downside of this, is that it requires dependencies on the `reflect` and + // `unsafe` packages. These may be restricted on platforms like AppEngine and + // thus prevent you from using this mode. + // + // Another disadvantage is that the byte slice we create, is strictly read-only. + // For most use-cases this is not a problem, but if you ever try to alter the + // returned byte slice, a runtime panic is thrown. Use this mode only on target + // platforms where memory constraints are an issue. + // + // The default behaviour is to use the old code generation method. This + // prevents the two previously mentioned issues, but will employ at least one + // extra memcopy and thus increase memory requirements. + // + // For instance, consider the following two examples: + // + // This would be the default mode, using an extra memcopy but gives a safe + // implementation without dependencies on `reflect` and `unsafe`: + // + // func myfile() []byte { + // return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a} + // } + // + // Here is the same functionality, but uses the `.rodata` hack. + // The byte slice returned from this example can not be written to without + // generating a runtime error. + // + // var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a" + // + // func myfile() []byte { + // var empty [0]byte + // sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile)) + // b := empty[:] + // bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + // bx.Data = sx.Data + // bx.Len = len(_myfile) + // bx.Cap = bx.Len + // return b + // } + NoMemCopy bool + + // NoCompress means the assets are /not/ GZIP compressed before being turned + // into Go code. The generated function will automatically unzip + // the file data when called. Defaults to false. + NoCompress bool + + // HttpFileSystem means whether generate return http.FileSystem interface + // instance's function.When true,will generate relate code. + HttpFileSystem bool + + // Perform a debug build. This generates an asset file, which + // loads the asset contents directly from disk at their original + // location, instead of embedding the contents in the code. + // + // This is mostly useful if you anticipate that the assets are + // going to change during your development cycle. You will always + // want your code to access the latest version of the asset. + // Only in release mode, will the assets actually be embedded + // in the code. The default behaviour is Release mode. + Debug bool + + // Perform a dev build, which is nearly identical to the debug option. The + // only difference is that instead of absolute file paths in generated code, + // it expects a variable, `rootDir`, to be set in the generated code's + // package (the author needs to do this manually), which it then prepends to + // an asset's name to construct the file path on disk. + // + // This is mainly so you can push the generated code file to a shared + // repository. + Dev bool + + // When true, size, mode and modtime are not preserved from files + NoMetadata bool + // When nonzero, use this as mode for all files. + Mode uint + // When nonzero, use this as unix timestamp for all files. + ModTime int64 + + // Ignores any filenames matching the regex pattern specified, e.g. + // path/to/file.ext will ignore only that file, or \\.gitignore + // will match any .gitignore file. + // + // This parameter can be provided multiple times. + Ignore []*regexp.Regexp +} + +// NewConfig returns a default configuration struct. +func NewConfig() *Config { + c := new(Config) + c.Package = "main" + c.NoMemCopy = false + c.NoCompress = false + c.HttpFileSystem = false + c.Debug = false + c.Output = "./bindata.go" + c.Ignore = make([]*regexp.Regexp, 0) + return c +} + +// validate ensures the config has sane values. +// Part of which means checking if certain file/directory paths exist. +func (c *Config) validate() error { + if len(c.Package) == 0 { + return fmt.Errorf("missing package name") + } + + for _, input := range c.Input { + _, err := os.Lstat(input.Path) + if err != nil { + return fmt.Errorf("failed to stat input path '%s': %v", input.Path, err) + } + } + + if len(c.Output) == 0 { + cwd, err := os.Getwd() + if err != nil { + return fmt.Errorf("unable to determine current working directory") + + } + + c.Output = filepath.Join(cwd, "bindata.go") + } + + stat, err := os.Lstat(c.Output) + if err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("output path: %v", err) + } + + // File does not exist. This is fine, just make + // sure the directory it is to be in exists. + dir, _ := filepath.Split(c.Output) + if dir != "" { + err = os.MkdirAll(dir, 0744) + + if err != nil { + return fmt.Errorf("create output directory: %v", err) + } + } + } + + if stat != nil && stat.IsDir() { + return fmt.Errorf("output path is a directory") + } + + return nil +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/convert.go b/vendor/github.com/go-bindata/go-bindata/v3/convert.go new file mode 100644 index 000000000..c8ed8ca5b --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/convert.go @@ -0,0 +1,261 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "unicode" +) + +// Translate reads assets from an input directory, converts them +// to Go code and writes new files to the output specified +// in the given configuration. +func Translate(c *Config) error { + var toc []Asset + + // Ensure our configuration has sane values. + err := c.validate() + if err != nil { + return err + } + + var knownFuncs = make(map[string]int) + var visitedPaths = make(map[string]bool) + // Locate all the assets. + for _, input := range c.Input { + err = findFiles(input.Path, c.Prefix, input.Recursive, &toc, c.Ignore, knownFuncs, visitedPaths) + if err != nil { + return err + } + } + + // Create output file. + fd, err := os.Create(c.Output) + if err != nil { + return err + } + + defer fd.Close() + + // Create a buffered writer for better performance. + bfd := bufio.NewWriter(fd) + defer bfd.Flush() + + // Write the header. This makes e.g. Github ignore diffs in generated files. + if _, err = fmt.Fprintf(bfd, "// Code generated by go-bindata. (@generated) DO NOT EDIT.\n\n //Package %s generated by go-bindata.", c.Package); err != nil { + return err + } + if _, err = fmt.Fprint(bfd, "// sources:\n"); err != nil { + return err + } + + wd, err := os.Getwd() + if err != nil { + return err + } + + for _, asset := range toc { + relative, _ := filepath.Rel(wd, asset.Path) + if _, err = fmt.Fprintf(bfd, "// %s\n", filepath.ToSlash(relative)); err != nil { + return err + } + } + //if _, err = fmt.Fprint(bfd, "// DO NOT EDIT!\n\n"); err != nil { + // return err + //} + + // Write build tags, if applicable. + if len(c.Tags) > 0 { + if _, err = fmt.Fprintf(bfd, "// +build %s\n\n", c.Tags); err != nil { + return err + } + } + + // Write package declaration. + _, err = fmt.Fprintf(bfd, "package %s\n\n", c.Package) + if err != nil { + return err + } + + // Write assets. + if c.Debug || c.Dev { + err = writeDebug(bfd, c, toc) + } else { + err = writeRelease(bfd, c, toc) + } + + if err != nil { + return err + } + + // Write table of contents + if err := writeTOC(bfd, toc); err != nil { + return err + } + // Write hierarchical tree of assets + if err := writeTOCTree(bfd, toc); err != nil { + return err + } + + // Write restore procedure + return writeRestore(bfd) +} + +// ByName implements sort.Interface for []os.FileInfo based on Name() +type ByName []os.FileInfo + +func (v ByName) Len() int { return len(v) } +func (v ByName) Swap(i, j int) { v[i], v[j] = v[j], v[i] } +func (v ByName) Less(i, j int) bool { return v[i].Name() < v[j].Name() } + +// findFiles recursively finds all the file paths in the given directory tree. +// They are added to the given map as keys. Values will be safe function names +// for each file, which will be used when generating the output code. +func findFiles(dir, prefix string, recursive bool, toc *[]Asset, ignore []*regexp.Regexp, knownFuncs map[string]int, visitedPaths map[string]bool) error { + dirpath := dir + if len(prefix) > 0 { + dirpath, _ = filepath.Abs(dirpath) + prefix, _ = filepath.Abs(prefix) + prefix = filepath.ToSlash(prefix) + } + + fi, err := os.Stat(dirpath) + if err != nil { + return err + } + + var list []os.FileInfo + + if !fi.IsDir() { + dirpath = filepath.Dir(dirpath) + list = []os.FileInfo{fi} + } else { + visitedPaths[dirpath] = true + fd, err := os.Open(dirpath) + if err != nil { + return err + } + + defer fd.Close() + + list, err = fd.Readdir(0) + if err != nil { + return err + } + + // Sort to make output stable between invocations + sort.Sort(ByName(list)) + } + + for _, file := range list { + var asset Asset + asset.Path = filepath.Join(dirpath, file.Name()) + asset.Name = filepath.ToSlash(asset.Path) + + ignoring := false + for _, re := range ignore { + if re.MatchString(asset.Path) { + ignoring = true + break + } + } + if ignoring { + continue + } + + if file.IsDir() { + if recursive { + recursivePath := filepath.Join(dir, file.Name()) + visitedPaths[asset.Path] = true + findFiles(recursivePath, prefix, recursive, toc, ignore, knownFuncs, visitedPaths) + } + continue + } else if file.Mode()&os.ModeSymlink == os.ModeSymlink { + var linkPath string + if linkPath, err = os.Readlink(asset.Path); err != nil { + return err + } + if !filepath.IsAbs(linkPath) { + if linkPath, err = filepath.Abs(dirpath + "/" + linkPath); err != nil { + return err + } + } + if _, ok := visitedPaths[linkPath]; !ok { + visitedPaths[linkPath] = true + findFiles(asset.Path, prefix, recursive, toc, ignore, knownFuncs, visitedPaths) + } + continue + } + + if strings.HasPrefix(asset.Name, prefix) { + asset.Name = asset.Name[len(prefix):] + } else { + asset.Name = filepath.Join(dir, file.Name()) + } + + // If we have a leading slash, get rid of it. + if len(asset.Name) > 0 && asset.Name[0] == '/' { + asset.Name = asset.Name[1:] + } + + // This shouldn't happen. + if len(asset.Name) == 0 { + return fmt.Errorf("invalid file: %v", asset.Path) + } + + asset.Func = safeFunctionName(asset.Name, knownFuncs) + asset.Path, _ = filepath.Abs(asset.Path) + *toc = append(*toc, asset) + } + + return nil +} + +var regFuncName = regexp.MustCompile(`[^a-zA-Z0-9_]`) + +// safeFunctionName converts the given name into a name +// which qualifies as a valid function identifier. It +// also compares against a known list of functions to +// prevent conflict based on name translation. +func safeFunctionName(name string, knownFuncs map[string]int) string { + var inBytes, outBytes []byte + var toUpper bool + + name = strings.ToLower(name) + inBytes = []byte(name) + + for i := 0; i < len(inBytes); i++ { + if regFuncName.Match([]byte{inBytes[i]}) { + toUpper = true + } else if toUpper { + outBytes = append(outBytes, []byte(strings.ToUpper(string(inBytes[i])))...) + toUpper = false + } else { + outBytes = append(outBytes, inBytes[i]) + } + } + + name = string(outBytes) + + // Identifier can't start with a digit. + if unicode.IsDigit(rune(name[0])) { + name = "_" + name + } + + if num, ok := knownFuncs[name]; ok { + knownFuncs[name] = num + 1 + name = fmt.Sprintf("%s%d", name, num) + } else { + knownFuncs[name] = 2 + } + + return name +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/debug.go b/vendor/github.com/go-bindata/go-bindata/v3/debug.go new file mode 100644 index 000000000..8587f877f --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/debug.go @@ -0,0 +1,146 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "fmt" + "io" +) + +// writeDebug writes the debug code file. +func writeDebug(w io.Writer, c *Config, toc []Asset) error { + err := writeDebugHeader(w, c) + if err != nil { + return err + } + + err = writeAssetFS(w, c) + if err != nil { + return err + } + + for i := range toc { + err = writeDebugAsset(w, c, &toc[i]) + if err != nil { + return err + } + } + + return nil +} + +// writeDebugHeader writes output file headers. +// This targets debug builds. +func writeDebugHeader(w io.Writer, c *Config) error { + var header string + + if c.HttpFileSystem { + header = `import ( + "bytes" + "net/http" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } else { + header = `import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } + + _, err := fmt.Fprintf(w, `%s +) + +// bindataRead reads the given file from disk. It returns an error on failure. +func bindataRead(path, name string) ([]byte, error) { + buf, err := ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("Error reading asset %%s at %%s: %%v", name, path, err) + } + return buf, err +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// ModTime return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +`, header) + return err +} + +// writeDebugAsset write a debug entry for the given asset. +// A debug entry is simply a function which reads the asset from +// the original file (e.g.: from disk). +func writeDebugAsset(w io.Writer, c *Config, asset *Asset) error { + pathExpr := fmt.Sprintf("%q", asset.Path) + if c.Dev { + pathExpr = fmt.Sprintf("filepath.Join(rootDir, %q)", asset.Name) + } + + _, err := fmt.Fprintf(w, `// %s reads file data from disk. It returns an error on failure. +func %s() (*asset, error) { + path := %s + name := %q + bytes, err := bindataRead(path, name) + if err != nil { + return nil, err + } + + fi, err := os.Stat(path) + if err != nil { + err = fmt.Errorf("Error reading asset info %%s at %%s: %%v", name, path, err) + } + + a := &asset{bytes: bytes, info: fi} + return a, err +} + +`, asset.Func, asset.Func, pathExpr, asset.Name) + return err +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/doc.go b/vendor/github.com/go-bindata/go-bindata/v3/doc.go new file mode 100644 index 000000000..aa8aaf62c --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/doc.go @@ -0,0 +1,129 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +/* +bindata converts any file into manageable Go source code. Useful for +embedding binary data into a go program. The file data is optionally gzip +compressed before being converted to a raw byte slice. + +The following paragraphs cover some of the customization options +which can be specified in the Config struct, which must be passed into +the Translate() call. + + +Debug vs Release builds + +When used with the `Debug` option, the generated code does not actually include +the asset data. Instead, it generates function stubs which load the data from +the original file on disk. The asset API remains identical between debug and +release builds, so your code will not have to change. + +This is useful during development when you expect the assets to change often. +The host application using these assets uses the same API in both cases and +will not have to care where the actual data comes from. + +An example is a Go webserver with some embedded, static web content like +HTML, JS and CSS files. While developing it, you do not want to rebuild the +whole server and restart it every time you make a change to a bit of +javascript. You just want to build and launch the server once. Then just press +refresh in the browser to see those changes. Embedding the assets with the +`debug` flag allows you to do just that. When you are finished developing and +ready for deployment, just re-invoke `go-bindata` without the `-debug` flag. +It will now embed the latest version of the assets. + + +Lower memory footprint + +The `NoMemCopy` option will alter the way the output file is generated. +It will employ a hack that allows us to read the file data directly from +the compiled program's `.rodata` section. This ensures that when we call +call our generated function, we omit unnecessary memcopies. + +The downside of this, is that it requires dependencies on the `reflect` and +`unsafe` packages. These may be restricted on platforms like AppEngine and +thus prevent you from using this mode. + +Another disadvantage is that the byte slice we create, is strictly read-only. +For most use-cases this is not a problem, but if you ever try to alter the +returned byte slice, a runtime panic is thrown. Use this mode only on target +platforms where memory constraints are an issue. + +The default behaviour is to use the old code generation method. This +prevents the two previously mentioned issues, but will employ at least one +extra memcopy and thus increase memory requirements. + +For instance, consider the following two examples: + +This would be the default mode, using an extra memcopy but gives a safe +implementation without dependencies on `reflect` and `unsafe`: + + func myfile() []byte { + return []byte{0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a} + } + +Here is the same functionality, but uses the `.rodata` hack. +The byte slice returned from this example can not be written to without +generating a runtime error. + + var _myfile = "\x89\x50\x4e\x47\x0d\x0a\x1a" + + func myfile() []byte { + var empty [0]byte + sx := (*reflect.StringHeader)(unsafe.Pointer(&_myfile)) + b := empty[:] + bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bx.Data = sx.Data + bx.Len = len(_myfile) + bx.Cap = bx.Len + return b + } + + +Optional compression + +The NoCompress option indicates that the supplied assets are *not* GZIP +compressed before being turned into Go code. The data should still be accessed +through a function call, so nothing changes in the API. + +This feature is useful if you do not care for compression, or the supplied +resource is already compressed. Doing it again would not add any value and may +even increase the size of the data. + +The default behaviour of the program is to use compression. + + +Path prefix stripping + +The keys used in the `_bindata` map are the same as the input file name +passed to `go-bindata`. This includes the path. In most cases, this is not +desirable, as it puts potentially sensitive information in your code base. +For this purpose, the tool supplies another command line flag `-prefix`. +This accepts a portion of a path name, which should be stripped off from +the map keys and function names. + +For example, running without the `-prefix` flag, we get: + + $ go-bindata /path/to/templates/ + + _bindata["/path/to/templates/foo.html"] = path_to_templates_foo_html + +Running with the `-prefix` flag, we get: + + $ go-bindata -prefix "/path/to/" /path/to/templates/ + + _bindata["templates/foo.html"] = templates_foo_html + + +Build tags + +With the optional Tags field, you can specify any go build tags that +must be fulfilled for the output file to be included in a build. This +is useful when including binary data in multiple formats, where the desired +format is specified at build time with the appropriate tags. + +The tags are appended to a `// +build` line in the beginning of the output file +and must follow the build tags syntax specified by the go tool. + +*/ +package bindata diff --git a/vendor/github.com/go-bindata/go-bindata/v3/file.go b/vendor/github.com/go-bindata/go-bindata/v3/file.go new file mode 100644 index 000000000..8c83926c8 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/file.go @@ -0,0 +1,102 @@ +package bindata + +import ( + "fmt" + "io" +) + +func writeAssetFS(w io.Writer, c *Config) error { + if !c.HttpFileSystem { + return nil + } + + _, err := fmt.Fprintf(w, ` +type assetFile struct { + *bytes.Reader + name string + childInfos []os.FileInfo + childInfoOffset int +} + +type assetOperator struct{} + +// Open implement http.FileSystem interface +func (f *assetOperator) Open(name string) (http.File, error) { + var err error + if len(name) > 0 && name[0] == '/' { + name = name[1:] + } + content, err := Asset(name) + if err == nil { + return &assetFile{name: name, Reader: bytes.NewReader(content)}, nil + } + children, err := AssetDir(name) + if err == nil { + childInfos := make([]os.FileInfo, 0, len(children)) + for _, child := range children { + childPath := filepath.Join(name, child) + info, errInfo := AssetInfo(filepath.Join(name, child)) + if errInfo == nil { + childInfos = append(childInfos, info) + } else { + childInfos = append(childInfos, newDirFileInfo(childPath)) + } + } + return &assetFile{name: name, childInfos: childInfos}, nil + } else { + // If the error is not found, return an error that will + // result in a 404 error. Otherwise the server returns + // a 500 error for files not found. + if strings.Contains(err.Error(), "not found") { + return nil, os.ErrNotExist + } + return nil, err + } +} + +// Close no need do anything +func (f *assetFile) Close() error { + return nil +} + +// Readdir read dir's children file info +func (f *assetFile) Readdir(count int) ([]os.FileInfo, error) { + if len(f.childInfos) == 0 { + return nil, os.ErrNotExist + } + if count <= 0 { + return f.childInfos, nil + } + if f.childInfoOffset+count > len(f.childInfos) { + count = len(f.childInfos) - f.childInfoOffset + } + offset := f.childInfoOffset + f.childInfoOffset += count + return f.childInfos[offset : offset+count], nil +} + +// Stat read file info from asset item +func (f *assetFile) Stat() (os.FileInfo, error) { + if len(f.childInfos) != 0 { + return newDirFileInfo(f.name), nil + } + return AssetInfo(f.name) +} + +// newDirFileInfo return default dir file info +func newDirFileInfo(name string) os.FileInfo { + return &bindataFileInfo{ + name: name, + size: 0, + mode: os.FileMode(2147484068), // equal os.FileMode(0644)|os.ModeDir + modTime: time.Time{}} +} + +// AssetFile return a http.FileSystem instance that data backend by asset +func AssetFile() http.FileSystem { + return &assetOperator{} +} + +`) + return err +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/go.mod b/vendor/github.com/go-bindata/go-bindata/v3/go.mod new file mode 100644 index 000000000..560ba0504 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/go.mod @@ -0,0 +1,8 @@ +module github.com/go-bindata/go-bindata/v3 + +go 1.12 + +require ( + github.com/kisielk/errcheck v1.2.0 + golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f +) diff --git a/vendor/github.com/go-bindata/go-bindata/v3/go.sum b/vendor/github.com/go-bindata/go-bindata/v3/go.sum new file mode 100644 index 000000000..d7cfc7240 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/go.sum @@ -0,0 +1,13 @@ +github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/go-bindata/go-bindata/v3/release.go b/vendor/github.com/go-bindata/go-bindata/v3/release.go new file mode 100644 index 000000000..6b72dde7b --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/release.go @@ -0,0 +1,473 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "unicode/utf8" +) + +// writeRelease writes the release code file. +func writeRelease(w io.Writer, c *Config, toc []Asset) error { + err := writeReleaseHeader(w, c) + if err != nil { + return err + } + + err = writeAssetFS(w, c) + if err != nil { + return err + } + + for i := range toc { + err = writeReleaseAsset(w, c, &toc[i]) + if err != nil { + return err + } + } + + return nil +} + +// writeReleaseHeader writes output file headers. +// This targets release builds. +func writeReleaseHeader(w io.Writer, c *Config) error { + var err error + if c.NoCompress { + if c.NoMemCopy { + err = header_uncompressed_nomemcopy(w, c) + } else { + err = header_uncompressed_memcopy(w, c) + } + } else { + if c.NoMemCopy { + err = header_compressed_nomemcopy(w, c) + } else { + err = header_compressed_memcopy(w, c) + } + } + if err != nil { + return err + } + return header_release_common(w) +} + +// writeReleaseAsset write a release entry for the given asset. +// A release entry is a function which embeds and returns +// the file's byte content. +func writeReleaseAsset(w io.Writer, c *Config, asset *Asset) error { + fd, err := os.Open(asset.Path) + if err != nil { + return err + } + + defer fd.Close() + + if c.NoCompress { + if c.NoMemCopy { + err = uncompressed_nomemcopy(w, asset, fd) + } else { + err = uncompressed_memcopy(w, asset, fd) + } + } else { + if c.NoMemCopy { + err = compressed_nomemcopy(w, asset, fd) + } else { + err = compressed_memcopy(w, asset, fd) + } + } + if err != nil { + return err + } + return asset_release_common(w, c, asset) +} + +// sanitize prepares a valid UTF-8 string as a raw string constant. +// Based on https://code.google.com/p/go/source/browse/godoc/static/makestatic.go?repo=tools +func sanitize(b []byte) []byte { + // Replace ` with `+"`"+` + b = bytes.Replace(b, []byte("`"), []byte("`+\"`\"+`"), -1) + + // Replace BOM with `+"\xEF\xBB\xBF"+` + // (A BOM is valid UTF-8 but not permitted in Go source files. + // I wouldn't bother handling this, but for some insane reason + // jquery.js has a BOM somewhere in the middle.) + return bytes.Replace(b, []byte("\xEF\xBB\xBF"), []byte("`+\"\\xEF\\xBB\\xBF\"+`"), -1) +} + +func header_compressed_nomemcopy(w io.Writer, c *Config) error { + var header string + + if c.HttpFileSystem { + header = `import ( + "bytes" + "compress/gzip" + "fmt" + "net/http" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } else { + header = `import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } + + _, err := fmt.Fprintf(w, `%s +) + +func bindataRead(data, name string) ([]byte, error) { + gz, err := gzip.NewReader(strings.NewReader(data)) + if err != nil { + return nil, fmt.Errorf("read %%q: %%v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %%q: %%v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +`, header) + return err +} + +func header_compressed_memcopy(w io.Writer, c *Config) error { + var header string + + if c.HttpFileSystem { + header = `import ( + "bytes" + "compress/gzip" + "fmt" + "net/http" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } else { + header = `import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } + + _, err := fmt.Fprintf(w, `%s +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("read %%q: %%v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("read %%q: %%v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +`, header) + return err +} + +func header_uncompressed_nomemcopy(w io.Writer, c *Config) error { + var header string + + if c.HttpFileSystem { + header = `import ( + "bytes" + "fmt" + "net/http" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "time" + "unsafe"` + } else { + header = `import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "strings" + "time" + "unsafe"` + } + + _, err := fmt.Fprintf(w, `%s +) + +func bindataRead(data, name string) ([]byte, error) { + var empty [0]byte + sx := (*reflect.StringHeader)(unsafe.Pointer(&data)) + b := empty[:] + bx := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bx.Data = sx.Data + bx.Len = len(data) + bx.Cap = bx.Len + return b, nil +} + +`, header) + return err +} + +func header_uncompressed_memcopy(w io.Writer, c *Config) error { + var header string + + if c.HttpFileSystem { + header = `import ( + "bytes" + "fmt" + "net/http" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } else { + header = `import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time"` + } + + _, err := fmt.Fprintf(w, `%s +) +`, header) + return err +} + +func header_release_common(w io.Writer) error { + _, err := fmt.Fprintf(w, `type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// ModTime return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +`) + return err +} + +func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error { + _, err := fmt.Fprintf(w, `var _%s = "`, asset.Func) + if err != nil { + return err + } + + gz := gzip.NewWriter(&StringWriter{Writer: w}) + _, err = io.Copy(gz, r) + gz.Close() + + if err != nil { + return err + } + + _, err = fmt.Fprintf(w, `" + +func %sBytes() ([]byte, error) { + return bindataRead( + _%s, + %q, + ) +} + +`, asset.Func, asset.Func, asset.Name) + return err +} + +func compressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error { + _, err := fmt.Fprintf(w, `var _%s = []byte("`, asset.Func) + if err != nil { + return err + } + + gz := gzip.NewWriter(&StringWriter{Writer: w}) + _, err = io.Copy(gz, r) + gz.Close() + + if err != nil { + return err + } + + _, err = fmt.Fprintf(w, `") + +func %sBytes() ([]byte, error) { + return bindataRead( + _%s, + %q, + ) +} + +`, asset.Func, asset.Func, asset.Name) + return err +} + +func uncompressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error { + _, err := fmt.Fprintf(w, `var _%s = "`, asset.Func) + if err != nil { + return err + } + + _, err = io.Copy(&StringWriter{Writer: w}, r) + if err != nil { + return err + } + + _, err = fmt.Fprintf(w, `" + +func %sBytes() ([]byte, error) { + return bindataRead( + _%s, + %q, + ) +} + +`, asset.Func, asset.Func, asset.Name) + return err +} + +func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error { + _, err := fmt.Fprintf(w, `var _%s = []byte(`, asset.Func) + if err != nil { + return err + } + + b, err := ioutil.ReadAll(r) + if err != nil { + return err + } + if utf8.Valid(b) && !bytes.Contains(b, []byte{0}) { + fmt.Fprintf(w, "`%s`", sanitize(b)) + } else { + fmt.Fprintf(w, "%+q", b) + } + + _, err = fmt.Fprintf(w, `) + +func %sBytes() ([]byte, error) { + return _%s, nil +} + +`, asset.Func, asset.Func) + return err +} + +func asset_release_common(w io.Writer, c *Config, asset *Asset) error { + fi, err := os.Stat(asset.Path) + if err != nil { + return err + } + + mode := uint(fi.Mode()) + modTime := fi.ModTime().Unix() + size := fi.Size() + if c.NoMetadata { + mode = 0 + modTime = 0 + size = 0 + } + if c.Mode > 0 { + mode = uint(os.ModePerm) & c.Mode + } + if c.ModTime > 0 { + modTime = c.ModTime + } + _, err = fmt.Fprintf(w, `func %s() (*asset, error) { + bytes, err := %sBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: %q, size: %d, mode: os.FileMode(%d), modTime: time.Unix(%d, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +`, asset.Func, asset.Func, asset.Name, size, mode, modTime) + return err +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/restore.go b/vendor/github.com/go-bindata/go-bindata/v3/restore.go new file mode 100644 index 000000000..268ec7464 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/restore.go @@ -0,0 +1,62 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "fmt" + "io" +) + +func writeRestore(w io.Writer) error { + _, err := fmt.Fprintf(w, ` +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} +`) + return err +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/stringwriter.go b/vendor/github.com/go-bindata/go-bindata/v3/stringwriter.go new file mode 100644 index 000000000..77daa04cb --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/stringwriter.go @@ -0,0 +1,36 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "io" +) + +const lowerHex = "0123456789abcdef" + +type StringWriter struct { + io.Writer + c int +} + +func (w *StringWriter) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return + } + + buf := []byte(`\x00`) + var b byte + + for n, b = range p { + buf[2] = lowerHex[b/16] + buf[3] = lowerHex[b%16] + w.Writer.Write(buf) + w.c++ + } + + n++ + + return +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/toc.go b/vendor/github.com/go-bindata/go-bindata/v3/toc.go new file mode 100644 index 000000000..be4a01654 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/toc.go @@ -0,0 +1,288 @@ +// This work is subject to the CC0 1.0 Universal (CC0 1.0) Public Domain Dedication +// license. Its contents can be found at: +// http://creativecommons.org/publicdomain/zero/1.0/ + +package bindata + +import ( + "fmt" + "io" + "sort" + "strings" +) + +type assetTree struct { + Asset Asset + Children map[string]*assetTree +} + +func newAssetTree() *assetTree { + tree := &assetTree{} + tree.Children = make(map[string]*assetTree) + return tree +} + +func (node *assetTree) child(name string) *assetTree { + rv, ok := node.Children[name] + if !ok { + rv = newAssetTree() + node.Children[name] = rv + } + return rv +} + +func (root *assetTree) Add(route []string, asset Asset) { + for _, name := range route { + root = root.child(name) + } + root.Asset = asset +} + +func ident(w io.Writer, n int) { + for i := 0; i < n; i++ { + w.Write([]byte{'\t'}) + } +} + +func (root *assetTree) funcOrNil() string { + if root.Asset.Func == "" { + return "nil" + } else { + return root.Asset.Func + } +} + +func getFillerSize(tokenIndex int, lengths []int, nident int) int { + var ( + curlen int = lengths[tokenIndex] + maxlen int = 0 + substart int = 0 + subend int = 0 + spacediff int = 0 + ) + + if curlen > 0 { + substart = tokenIndex + for (substart-1) >= 0 && lengths[substart-1] > 0 { + substart -= 1 + } + + subend = tokenIndex + for (subend+1) < len(lengths) && lengths[subend+1] > 0 { + subend += 1 + } + + var candidate int + for j := substart; j <= subend; j += 1 { + candidate = lengths[j] + if candidate > maxlen { + maxlen = candidate + } + } + + spacediff = maxlen - curlen + } + + return spacediff +} + +func (root *assetTree) writeGoMap(w io.Writer, nident int) { + fmt.Fprintf(w, "&bintree{%s, map[string]*bintree{", root.funcOrNil()) + + if len(root.Children) > 0 { + io.WriteString(w, "\n") + + // Sort to make output stable between invocations + filenames := make([]string, len(root.Children)) + hasChildren := make(map[string]bool) + i := 0 + for filename, node := range root.Children { + filenames[i] = filename + hasChildren[filename] = len(node.Children) > 0 + i++ + } + sort.Strings(filenames) + + lengths := make([]int, len(root.Children)) + for i, filename := range filenames { + if hasChildren[filename] { + lengths[i] = 0 + } else { + lengths[i] = len(filename) + } + } + + for i, p := range filenames { + ident(w, nident+1) + filler := strings.Repeat(" ", getFillerSize(i, lengths, nident)) + fmt.Fprintf(w, `"%s": %s`, p, filler) + root.Children[p].writeGoMap(w, nident+1) + } + ident(w, nident) + } + + io.WriteString(w, "}}") + if nident > 0 { + io.WriteString(w, ",") + } + io.WriteString(w, "\n") +} + +func (root *assetTree) WriteAsGoMap(w io.Writer) error { + _, err := fmt.Fprint(w, `type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = `) + root.writeGoMap(w, 0) + return err +} + +func writeTOCTree(w io.Writer, toc []Asset) error { + _, err := fmt.Fprintf(w, `// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %%s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %%s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +`) + if err != nil { + return err + } + tree := newAssetTree() + for i := range toc { + pathList := strings.Split(toc[i].Name, "/") + tree.Add(pathList, toc[i]) + } + return tree.WriteAsGoMap(w) +} + +// writeTOC writes the table of contents file. +func writeTOC(w io.Writer, toc []Asset) error { + err := writeTOCHeader(w) + if err != nil { + return err + } + + var maxlen = 0 + for i := range toc { + l := len(toc[i].Name) + if l > maxlen { + maxlen = l + } + } + + for i := range toc { + err = writeTOCAsset(w, &toc[i], maxlen) + if err != nil { + return err + } + } + + return writeTOCFooter(w) +} + +// writeTOCHeader writes the table of contents file header. +func writeTOCHeader(w io.Writer) error { + _, err := fmt.Fprintf(w, `// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %%s can't read by error: %%v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %%s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %%s can't read by error: %%v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %%s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ +`) + return err +} + +// writeTOCAsset write a TOC entry for the given asset. +func writeTOCAsset(w io.Writer, asset *Asset, maxlen int) error { + spacediff := maxlen - len(asset.Name) + filler := strings.Repeat(" ", spacediff) + + _, err := fmt.Fprintf(w, "\t%q: %s%s,\n", asset.Name, filler, asset.Func) + return err +} + +// writeTOCFooter writes the table of contents file footer. +func writeTOCFooter(w io.Writer) error { + _, err := fmt.Fprintf(w, `} + +`) + return err +} diff --git a/vendor/github.com/go-bindata/go-bindata/v3/tools.go b/vendor/github.com/go-bindata/go-bindata/v3/tools.go new file mode 100644 index 000000000..583a04643 --- /dev/null +++ b/vendor/github.com/go-bindata/go-bindata/v3/tools.go @@ -0,0 +1,8 @@ +// +build tools + +package tools + +import ( + _ "github.com/kisielk/errcheck" + _ "golang.org/x/lint/golint" +) diff --git a/vendor/github.com/gobuffalo/flect/.gitignore b/vendor/github.com/gobuffalo/flect/.gitignore new file mode 100644 index 000000000..368971859 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/.gitignore @@ -0,0 +1,29 @@ +*.log +.DS_Store +doc +tmp +pkg +*.gem +*.pid +coverage +coverage.data +build/* +*.pbxuser +*.mode1v3 +.svn +profile +.console_history +.sass-cache/* +.rake_tasks~ +*.log.lck +solr/ +.jhw-cache/ +jhw.* +*.sublime* +node_modules/ +dist/ +generated/ +.vendor/ +bin/* +gin-bin +.idea/ diff --git a/vendor/github.com/gobuffalo/flect/.gometalinter.json b/vendor/github.com/gobuffalo/flect/.gometalinter.json new file mode 100644 index 000000000..e4f65a36e --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/.gometalinter.json @@ -0,0 +1,3 @@ +{ + "Enable": ["vet", "golint", "goimports", "deadcode", "gotype", "ineffassign", "misspell", "nakedret", "unconvert", "megacheck", "varcheck"] +} diff --git a/vendor/github.com/gobuffalo/flect/LICENSE b/vendor/github.com/gobuffalo/flect/LICENSE new file mode 100644 index 000000000..649efd437 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Mark Bates + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/gobuffalo/flect/Makefile b/vendor/github.com/gobuffalo/flect/Makefile new file mode 100644 index 000000000..0ac539f1c --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/Makefile @@ -0,0 +1,61 @@ +TAGS ?= "" +GO_BIN ?= "go" + +install: + $(GO_BIN) install -tags ${TAGS} -v . + make tidy + +tidy: +ifeq ($(GO111MODULE),on) + $(GO_BIN) mod tidy +else + echo skipping go mod tidy +endif + +deps: + $(GO_BIN) get -tags ${TAGS} -t ./... + make tidy + +build: + $(GO_BIN) build -v . + make tidy + +test: + $(GO_BIN) test -cover -tags ${TAGS} ./... + make tidy + +ci-deps: + $(GO_BIN) get -tags ${TAGS} -t ./... + +ci-test: + $(GO_BIN) test -tags ${TAGS} -race ./... + +lint: + go get github.com/golangci/golangci-lint/cmd/golangci-lint + golangci-lint run --enable-all + make tidy + +update: +ifeq ($(GO111MODULE),on) + rm go.* + $(GO_BIN) mod init + $(GO_BIN) mod tidy +else + $(GO_BIN) get -u -tags ${TAGS} +endif + make test + make install + make tidy + +release-test: + $(GO_BIN) test -tags ${TAGS} -race ./... + make tidy + +release: + $(GO_BIN) get github.com/gobuffalo/release + make tidy + release -y -f version.go --skip-packr + make tidy + + + diff --git a/vendor/github.com/gobuffalo/flect/README.md b/vendor/github.com/gobuffalo/flect/README.md new file mode 100644 index 000000000..2d9a1bd3b --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/README.md @@ -0,0 +1,36 @@ +# Flect + +

+GoDoc +CI +Go Report Card +

+ +This is a new inflection engine to replace [https://github.com/markbates/inflect](https://github.com/markbates/inflect) designed to be more modular, more readable, and easier to fix issues on than the original. + +## Installation + +```bash +$ go get -u -v github.com/gobuffalo/flect +``` + +## `github.com/gobuffalo/flect` +GoDoc + +The `github.com/gobuffalo/flect` package contains "basic" inflection tools, like pluralization, singularization, etc... + +### The `Ident` Type + +In addition to helpful methods that take in a `string` and return a `string`, there is an `Ident` type that can be used to create new, custom, inflection rules. + +The `Ident` type contains two fields. + +* `Original` - This is the original `string` that was used to create the `Ident` +* `Parts` - This is a `[]string` that represents all of the "parts" of the string, that have been split apart, making the segments easier to work with + +Examples of creating new inflection rules using `Ident` can be found in the `github.com/gobuffalo/flect/name` package. + +## `github.com/gobuffalo/flect/name` +GoDoc + +The `github.com/gobuffalo/flect/name` package contains more "business" inflection rules like creating proper names, table names, etc... diff --git a/vendor/github.com/gobuffalo/flect/SHOULDERS.md b/vendor/github.com/gobuffalo/flect/SHOULDERS.md new file mode 100644 index 000000000..8c359f157 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/SHOULDERS.md @@ -0,0 +1,10 @@ +# github.com/gobuffalo/flect Stands on the Shoulders of Giants + +github.com/gobuffalo/flect does not try to reinvent the wheel! Instead, it uses the already great wheels developed by the Go community and puts them all together in the best way possible. Without these giants, this project would not be possible. Please make sure to check them out and thank them for all of their hard work. + +Thank you to the following **GIANTS**: + + +* [github.com/davecgh/go-spew](https://godoc.org/github.com/davecgh/go-spew) + +* [github.com/stretchr/testify](https://godoc.org/github.com/stretchr/testify) diff --git a/vendor/github.com/gobuffalo/flect/acronyms.go b/vendor/github.com/gobuffalo/flect/acronyms.go new file mode 100644 index 000000000..b169724a4 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/acronyms.go @@ -0,0 +1,152 @@ +package flect + +import "sync" + +var acronymsMoot = &sync.RWMutex{} + +var baseAcronyms = map[string]bool{ + "OK": true, + "UTF8": true, + "HTML": true, + "JSON": true, + "JWT": true, + "ID": true, + "UUID": true, + "SQL": true, + "ACK": true, + "ACL": true, + "ADSL": true, + "AES": true, + "ANSI": true, + "API": true, + "ARP": true, + "ATM": true, + "BGP": true, + "BSS": true, + "CCITT": true, + "CHAP": true, + "CIDR": true, + "CIR": true, + "CLI": true, + "CPE": true, + "CPU": true, + "CRC": true, + "CRT": true, + "CSMA": true, + "CMOS": true, + "DCE": true, + "DEC": true, + "DES": true, + "DHCP": true, + "DNS": true, + "DRAM": true, + "DSL": true, + "DSLAM": true, + "DTE": true, + "DMI": true, + "EHA": true, + "EIA": true, + "EIGRP": true, + "EOF": true, + "ESS": true, + "FCC": true, + "FCS": true, + "FDDI": true, + "FTP": true, + "GBIC": true, + "gbps": true, + "GEPOF": true, + "HDLC": true, + "HTTP": true, + "HTTPS": true, + "IANA": true, + "ICMP": true, + "IDF": true, + "IDS": true, + "IEEE": true, + "IETF": true, + "IMAP": true, + "IP": true, + "IPS": true, + "ISDN": true, + "ISP": true, + "kbps": true, + "LACP": true, + "LAN": true, + "LAPB": true, + "LAPF": true, + "LLC": true, + "MAC": true, + "Mbps": true, + "MC": true, + "MDF": true, + "MIB": true, + "MoCA": true, + "MPLS": true, + "MTU": true, + "NAC": true, + "NAT": true, + "NBMA": true, + "NIC": true, + "NRZ": true, + "NRZI": true, + "NVRAM": true, + "OSI": true, + "OSPF": true, + "OUI": true, + "PAP": true, + "PAT": true, + "PC": true, + "PIM": true, + "PCM": true, + "PDU": true, + "POP3": true, + "POTS": true, + "PPP": true, + "PPTP": true, + "PTT": true, + "PVST": true, + "RAM": true, + "RARP": true, + "RFC": true, + "RIP": true, + "RLL": true, + "ROM": true, + "RSTP": true, + "RTP": true, + "RCP": true, + "SDLC": true, + "SFD": true, + "SFP": true, + "SLARP": true, + "SLIP": true, + "SMTP": true, + "SNA": true, + "SNAP": true, + "SNMP": true, + "SOF": true, + "SRAM": true, + "SSH": true, + "SSID": true, + "STP": true, + "SYN": true, + "TDM": true, + "TFTP": true, + "TIA": true, + "TOFU": true, + "UDP": true, + "URL": true, + "URI": true, + "USB": true, + "UTP": true, + "VC": true, + "VLAN": true, + "VLSM": true, + "VPN": true, + "W3C": true, + "WAN": true, + "WEP": true, + "WiFi": true, + "WPA": true, + "WWW": true, +} diff --git a/vendor/github.com/gobuffalo/flect/azure-pipelines.yml b/vendor/github.com/gobuffalo/flect/azure-pipelines.yml new file mode 100644 index 000000000..417e2c579 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/azure-pipelines.yml @@ -0,0 +1,71 @@ +variables: + GOBIN: "$(GOPATH)/bin" # Go binaries path + GOPATH: "$(system.defaultWorkingDirectory)/gopath" # Go workspace path + modulePath: "$(GOPATH)/src/github.com/$(build.repository.name)" # Path to the module"s code + +jobs: +- job: Windows + pool: + vmImage: "vs2017-win2016" + strategy: + matrix: + go 1.10: + go_version: "1.10" + go 1.11 (on): + go_version: "1.11.5" + GO111MODULE: "on" + go 1.11 (off): + go_version: "1.11.5" + GO111MODULE: "off" + go 1.12 (on): + go_version: "1.12" + GO111MODULE: "on" + go 1.12 (off): + go_version: "1.12" + GO111MODULE: "off" + steps: + - template: azure-tests.yml + +- job: macOS + pool: + vmImage: "macOS-10.13" + strategy: + matrix: + go 1.10: + go_version: "1.10" + go 1.11 (on): + go_version: "1.11.5" + GO111MODULE: "on" + go 1.11 (off): + go_version: "1.11.5" + GO111MODULE: "off" + go 1.12 (on): + go_version: "1.12" + GO111MODULE: "on" + go 1.12 (off): + go_version: "1.12" + GO111MODULE: "off" + steps: + - template: azure-tests.yml + +- job: Linux + pool: + vmImage: "ubuntu-16.04" + strategy: + matrix: + go 1.10: + go_version: "1.10" + go 1.11 (on): + go_version: "1.11.5" + GO111MODULE: "on" + go 1.11 (off): + go_version: "1.11.5" + GO111MODULE: "off" + go 1.12 (on): + go_version: "1.12" + GO111MODULE: "on" + go 1.12 (off): + go_version: "1.12" + GO111MODULE: "off" + steps: + - template: azure-tests.yml diff --git a/vendor/github.com/gobuffalo/flect/azure-tests.yml b/vendor/github.com/gobuffalo/flect/azure-tests.yml new file mode 100644 index 000000000..eea5822fa --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/azure-tests.yml @@ -0,0 +1,19 @@ +steps: + - task: GoTool@0 + inputs: + version: $(go_version) + - task: Bash@3 + inputs: + targetType: inline + script: | + mkdir -p "$(GOBIN)" + mkdir -p "$(GOPATH)/pkg" + mkdir -p "$(modulePath)" + shopt -s extglob + mv !(gopath) "$(modulePath)" + displayName: "Setup Go Workspace" + - script: | + go get -t -v ./... + go test -race ./... + workingDirectory: "$(modulePath)" + displayName: "Tests" diff --git a/vendor/github.com/gobuffalo/flect/camelize.go b/vendor/github.com/gobuffalo/flect/camelize.go new file mode 100644 index 000000000..8a9928e8b --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/camelize.go @@ -0,0 +1,48 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Camelize returns a camelize version of a string +// bob dylan = bobDylan +// widget_id = widgetID +// WidgetID = widgetID +func Camelize(s string) string { + return New(s).Camelize().String() +} + +// Camelize returns a camelize version of a string +// bob dylan = bobDylan +// widget_id = widgetID +// WidgetID = widgetID +func (i Ident) Camelize() Ident { + var out []string + for i, part := range i.Parts { + var x string + var capped bool + if strings.ToLower(part) == "id" { + out = append(out, "ID") + continue + } + for _, c := range part { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + if i == 0 { + x += string(unicode.ToLower(c)) + continue + } + if !capped { + capped = true + x += string(unicode.ToUpper(c)) + continue + } + x += string(c) + } + } + if x != "" { + out = append(out, x) + } + } + return New(strings.Join(out, "")) +} diff --git a/vendor/github.com/gobuffalo/flect/capitalize.go b/vendor/github.com/gobuffalo/flect/capitalize.go new file mode 100644 index 000000000..42ecc166c --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/capitalize.go @@ -0,0 +1,27 @@ +package flect + +import "unicode" + +// Capitalize will cap the first letter of string +// user = User +// bob dylan = Bob dylan +// widget_id = Widget_id +func Capitalize(s string) string { + return New(s).Capitalize().String() +} + +// Capitalize will cap the first letter of string +// user = User +// bob dylan = Bob dylan +// widget_id = Widget_id +func (i Ident) Capitalize() Ident { + var x string + if len(i.Parts) == 0 { + return New("") + } + x = string(unicode.ToTitle(rune(i.Original[0]))) + if len(i.Original) > 1 { + x += i.Original[1:] + } + return New(x) +} diff --git a/vendor/github.com/gobuffalo/flect/custom_data.go b/vendor/github.com/gobuffalo/flect/custom_data.go new file mode 100644 index 000000000..9a2dfc74a --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/custom_data.go @@ -0,0 +1,83 @@ +package flect + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" +) + +func init() { + loadCustomData("inflections.json", "INFLECT_PATH", "could not read inflection file", LoadInflections) + loadCustomData("acronyms.json", "ACRONYMS_PATH", "could not read acronyms file", LoadAcronyms) +} + +//CustomDataParser are functions that parse data like acronyms or +//plurals in the shape of a io.Reader it receives. +type CustomDataParser func(io.Reader) error + +func loadCustomData(defaultFile, env, readErrorMessage string, parser CustomDataParser) { + pwd, _ := os.Getwd() + path, found := os.LookupEnv(env) + if !found { + path = filepath.Join(pwd, defaultFile) + } + + if _, err := os.Stat(path); err != nil { + return + } + + b, err := ioutil.ReadFile(path) + if err != nil { + fmt.Printf("%s %s (%s)\n", readErrorMessage, path, err) + return + } + + if err = parser(bytes.NewReader(b)); err != nil { + fmt.Println(err) + } +} + +//LoadAcronyms loads rules from io.Reader param +func LoadAcronyms(r io.Reader) error { + m := []string{} + err := json.NewDecoder(r).Decode(&m) + + if err != nil { + return fmt.Errorf("could not decode acronyms JSON from reader: %s", err) + } + + acronymsMoot.Lock() + defer acronymsMoot.Unlock() + + for _, acronym := range m { + baseAcronyms[acronym] = true + } + + return nil +} + +//LoadInflections loads rules from io.Reader param +func LoadInflections(r io.Reader) error { + m := map[string]string{} + + err := json.NewDecoder(r).Decode(&m) + if err != nil { + return fmt.Errorf("could not decode inflection JSON from reader: %s", err) + } + + pluralMoot.Lock() + defer pluralMoot.Unlock() + singularMoot.Lock() + defer singularMoot.Unlock() + + for s, p := range m { + singleToPlural[s] = p + pluralToSingle[p] = s + } + + return nil +} diff --git a/vendor/github.com/gobuffalo/flect/dasherize.go b/vendor/github.com/gobuffalo/flect/dasherize.go new file mode 100644 index 000000000..c7a8a33e3 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/dasherize.go @@ -0,0 +1,34 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Dasherize returns an alphanumeric, lowercased, dashed string +// Donald E. Knuth = donald-e-knuth +// Test with + sign = test-with-sign +// admin/WidgetID = admin-widget-id +func Dasherize(s string) string { + return New(s).Dasherize().String() +} + +// Dasherize returns an alphanumeric, lowercased, dashed string +// Donald E. Knuth = donald-e-knuth +// Test with + sign = test-with-sign +// admin/WidgetID = admin-widget-id +func (i Ident) Dasherize() Ident { + var parts []string + + for _, part := range i.Parts { + var x string + for _, c := range part { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + x += string(c) + } + } + parts = xappend(parts, x) + } + + return New(strings.ToLower(strings.Join(parts, "-"))) +} diff --git a/vendor/github.com/gobuffalo/flect/flect.go b/vendor/github.com/gobuffalo/flect/flect.go new file mode 100644 index 000000000..ee81b6f2b --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/flect.go @@ -0,0 +1,43 @@ +/* +Package flect is a new inflection engine to replace [https://github.com/markbates/inflect](https://github.com/markbates/inflect) designed to be more modular, more readable, and easier to fix issues on than the original. +*/ +package flect + +import ( + "strings" + "unicode" +) + +var spaces = []rune{'_', ' ', ':', '-', '/'} + +func isSpace(c rune) bool { + for _, r := range spaces { + if r == c { + return true + } + } + return unicode.IsSpace(c) +} + +func xappend(a []string, ss ...string) []string { + for _, s := range ss { + s = strings.TrimSpace(s) + for _, x := range spaces { + s = strings.Trim(s, string(x)) + } + if _, ok := baseAcronyms[strings.ToUpper(s)]; ok { + s = strings.ToUpper(s) + } + if s != "" { + a = append(a, s) + } + } + return a +} + +func abs(x int) int { + if x < 0 { + return -x + } + return x +} diff --git a/vendor/github.com/gobuffalo/flect/go.mod b/vendor/github.com/gobuffalo/flect/go.mod new file mode 100644 index 000000000..cd02d074b --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/go.mod @@ -0,0 +1,8 @@ +module github.com/gobuffalo/flect + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/gobuffalo/flect/go.sum b/vendor/github.com/gobuffalo/flect/go.sum new file mode 100644 index 000000000..4f76e62c1 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/gobuffalo/flect/humanize.go b/vendor/github.com/gobuffalo/flect/humanize.go new file mode 100644 index 000000000..ded093970 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/humanize.go @@ -0,0 +1,35 @@ +package flect + +import ( + "strings" +) + +// Humanize returns first letter of sentence capitalized. +// Common acronyms are capitalized as well. +// Other capital letters in string are left as provided. +// employee_salary = Employee salary +// employee_id = employee ID +// employee_mobile_number = Employee mobile number +// first_Name = First Name +// firstName = First Name +func Humanize(s string) string { + return New(s).Humanize().String() +} + +// Humanize First letter of sentence capitalized +func (i Ident) Humanize() Ident { + if len(i.Original) == 0 { + return New("") + } + + var parts []string + for index, part := range i.Parts { + if index == 0 { + part = strings.Title(i.Parts[0]) + } + + parts = xappend(parts, part) + } + + return New(strings.Join(parts, " ")) +} diff --git a/vendor/github.com/gobuffalo/flect/ident.go b/vendor/github.com/gobuffalo/flect/ident.go new file mode 100644 index 000000000..78b51d457 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/ident.go @@ -0,0 +1,106 @@ +package flect + +import ( + "encoding" + "strings" + "unicode" + "unicode/utf8" +) + +// Ident represents the string and it's parts +type Ident struct { + Original string + Parts []string +} + +// String implements fmt.Stringer and returns the original string +func (i Ident) String() string { + return i.Original +} + +// New creates a new Ident from the string +func New(s string) Ident { + i := Ident{ + Original: s, + Parts: toParts(s), + } + + return i +} + +func toParts(s string) []string { + parts := []string{} + s = strings.TrimSpace(s) + if len(s) == 0 { + return parts + } + if _, ok := baseAcronyms[strings.ToUpper(s)]; ok { + return []string{strings.ToUpper(s)} + } + var prev rune + var x string + for _, c := range s { + cs := string(c) + // fmt.Println("### cs ->", cs) + // fmt.Println("### unicode.IsControl(c) ->", unicode.IsControl(c)) + // fmt.Println("### unicode.IsDigit(c) ->", unicode.IsDigit(c)) + // fmt.Println("### unicode.IsGraphic(c) ->", unicode.IsGraphic(c)) + // fmt.Println("### unicode.IsLetter(c) ->", unicode.IsLetter(c)) + // fmt.Println("### unicode.IsLower(c) ->", unicode.IsLower(c)) + // fmt.Println("### unicode.IsMark(c) ->", unicode.IsMark(c)) + // fmt.Println("### unicode.IsPrint(c) ->", unicode.IsPrint(c)) + // fmt.Println("### unicode.IsPunct(c) ->", unicode.IsPunct(c)) + // fmt.Println("### unicode.IsSpace(c) ->", unicode.IsSpace(c)) + // fmt.Println("### unicode.IsTitle(c) ->", unicode.IsTitle(c)) + // fmt.Println("### unicode.IsUpper(c) ->", unicode.IsUpper(c)) + if !utf8.ValidRune(c) { + continue + } + + if isSpace(c) { + parts = xappend(parts, x) + x = cs + prev = c + continue + } + + if unicode.IsUpper(c) && !unicode.IsUpper(prev) { + parts = xappend(parts, x) + x = cs + prev = c + continue + } + if unicode.IsUpper(c) && baseAcronyms[strings.ToUpper(x)] { + parts = xappend(parts, x) + x = cs + prev = c + continue + } + if unicode.IsLetter(c) || unicode.IsDigit(c) || unicode.IsPunct(c) || c == '`' { + prev = c + x += cs + continue + } + + parts = xappend(parts, x) + x = "" + prev = c + } + parts = xappend(parts, x) + + return parts +} + +var _ encoding.TextUnmarshaler = &Ident{} +var _ encoding.TextMarshaler = &Ident{} + +//UnmarshalText unmarshalls byte array into the Ident +func (i *Ident) UnmarshalText(data []byte) error { + (*i) = New(string(data)) + return nil +} + +//MarshalText marshals Ident into byte array +func (i Ident) MarshalText() ([]byte, error) { + return []byte(i.Original), nil +} diff --git a/vendor/github.com/gobuffalo/flect/lower_upper.go b/vendor/github.com/gobuffalo/flect/lower_upper.go new file mode 100644 index 000000000..930da58d8 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/lower_upper.go @@ -0,0 +1,13 @@ +package flect + +import "strings" + +// ToUpper is a convience wrapper for strings.ToUpper +func (i Ident) ToUpper() Ident { + return New(strings.ToUpper(i.Original)) +} + +// ToLower is a convience wrapper for strings.ToLower +func (i Ident) ToLower() Ident { + return New(strings.ToLower(i.Original)) +} diff --git a/vendor/github.com/gobuffalo/flect/ordinalize.go b/vendor/github.com/gobuffalo/flect/ordinalize.go new file mode 100644 index 000000000..1ce27b3a0 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/ordinalize.go @@ -0,0 +1,43 @@ +package flect + +import ( + "fmt" + "strconv" +) + +// Ordinalize converts a number to an ordinal version +// 42 = 42nd +// 45 = 45th +// 1 = 1st +func Ordinalize(s string) string { + return New(s).Ordinalize().String() +} + +// Ordinalize converts a number to an ordinal version +// 42 = 42nd +// 45 = 45th +// 1 = 1st +func (i Ident) Ordinalize() Ident { + number, err := strconv.Atoi(i.Original) + if err != nil { + return i + } + var s string + switch abs(number) % 100 { + case 11, 12, 13: + s = fmt.Sprintf("%dth", number) + default: + switch abs(number) % 10 { + case 1: + s = fmt.Sprintf("%dst", number) + case 2: + s = fmt.Sprintf("%dnd", number) + case 3: + s = fmt.Sprintf("%drd", number) + } + } + if s != "" { + return New(s) + } + return New(fmt.Sprintf("%dth", number)) +} diff --git a/vendor/github.com/gobuffalo/flect/pascalize.go b/vendor/github.com/gobuffalo/flect/pascalize.go new file mode 100644 index 000000000..76f0c6a70 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/pascalize.go @@ -0,0 +1,25 @@ +package flect + +import ( + "unicode" +) + +// Pascalize returns a string with each segment capitalized +// user = User +// bob dylan = BobDylan +// widget_id = WidgetID +func Pascalize(s string) string { + return New(s).Pascalize().String() +} + +// Pascalize returns a string with each segment capitalized +// user = User +// bob dylan = BobDylan +// widget_id = WidgetID +func (i Ident) Pascalize() Ident { + c := i.Camelize() + if len(c.String()) == 0 { + return c + } + return New(string(unicode.ToUpper(rune(c.Original[0]))) + c.Original[1:]) +} diff --git a/vendor/github.com/gobuffalo/flect/plural_rules.go b/vendor/github.com/gobuffalo/flect/plural_rules.go new file mode 100644 index 000000000..86fca8c5f --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/plural_rules.go @@ -0,0 +1,284 @@ +package flect + +var pluralRules = []rule{} + +// AddPlural adds a rule that will replace the given suffix with the replacement suffix. +func AddPlural(suffix string, repl string) { + pluralMoot.Lock() + defer pluralMoot.Unlock() + pluralRules = append(pluralRules, rule{ + suffix: suffix, + fn: func(s string) string { + s = s[:len(s)-len(suffix)] + return s + repl + }, + }) + + pluralRules = append(pluralRules, rule{ + suffix: repl, + fn: noop, + }) +} + +var singleToPlural = map[string]string{ + "aircraft": "aircraft", + "alias": "aliases", + "alumna": "alumnae", + "alumnus": "alumni", + "analysis": "analyses", + "antenna": "antennas", + "antithesis": "antitheses", + "apex": "apexes", + "appendix": "appendices", + "axis": "axes", + "bacillus": "bacilli", + "bacterium": "bacteria", + "basis": "bases", + "beau": "beaus", + "bison": "bison", + "bureau": "bureaus", + "bus": "buses", + "campus": "campuses", + "caucus": "caucuses", + "château": "châteaux", + "circus": "circuses", + "codex": "codices", + "concerto": "concertos", + "corpus": "corpora", + "crisis": "crises", + "curriculum": "curriculums", + "datum": "data", + "dear": "dear", + "deer": "deer", + "diagnosis": "diagnoses", + "die": "dice", + "dwarf": "dwarves", + "ellipsis": "ellipses", + "equipment": "equipment", + "erratum": "errata", + "faux pas": "faux pas", + "fez": "fezzes", + "fish": "fish", + "focus": "foci", + "foo": "foos", + "foot": "feet", + "formula": "formulas", + "fungus": "fungi", + "genus": "genera", + "goose": "geese", + "graffito": "graffiti", + "grouse": "grouse", + "half": "halves", + "halo": "halos", + "hoof": "hooves", + "human": "humans", + "hypothesis": "hypotheses", + "index": "indices", + "information": "information", + "jeans": "jeans", + "larva": "larvae", + "libretto": "librettos", + "loaf": "loaves", + "locus": "loci", + "louse": "lice", + "matrix": "matrices", + "minutia": "minutiae", + "money": "money", + "moose": "moose", + "mouse": "mice", + "nebula": "nebulae", + "news": "news", + "nucleus": "nuclei", + "oasis": "oases", + "octopus": "octopi", + "offspring": "offspring", + "opus": "opera", + "ovum": "ova", + "ox": "oxen", + "parenthesis": "parentheses", + "phenomenon": "phenomena", + "photo": "photos", + "phylum": "phyla", + "piano": "pianos", + "plus": "pluses", + "police": "police", + "prognosis": "prognoses", + "prometheus": "prometheuses", + "quiz": "quizzes", + "radius": "radiuses", + "referendum": "referendums", + "ress": "resses", + "rice": "rice", + "salmon": "salmon", + "series": "series", + "sheep": "sheep", + "shoe": "shoes", + "shrimp": "shrimp", + "species": "species", + "stimulus": "stimuli", + "stratum": "strata", + "swine": "swine", + "syllabus": "syllabi", + "symposium": "symposiums", + "synopsis": "synopses", + "tableau": "tableaus", + "testis": "testes", + "thesis": "theses", + "thief": "thieves", + "tooth": "teeth", + "trout": "trout", + "tuna": "tuna", + "vertebra": "vertebrae", + "vertix": "vertices", + "vita": "vitae", + "vortex": "vortices", + "wharf": "wharves", + "wife": "wives", + "wolf": "wolves", + "you": "you", +} + +var pluralToSingle = map[string]string{} + +func init() { + for k, v := range singleToPlural { + pluralToSingle[v] = k + } +} + +type singularToPluralSuffix struct { + singular string + plural string +} + +var singularToPluralSuffixList = []singularToPluralSuffix{ + {"iterion", "iteria"}, + {"campus", "campuses"}, + {"genera", "genus"}, + {"person", "people"}, + {"phylum", "phyla"}, + {"randum", "randa"}, + {"actus", "acti"}, + {"adium", "adia"}, + {"alias", "aliases"}, + {"basis", "basis"}, + {"child", "children"}, + {"chive", "chives"}, + {"focus", "foci"}, + {"hello", "hellos"}, + {"jeans", "jeans"}, + {"louse", "lice"}, + {"mouse", "mice"}, + {"movie", "movies"}, + {"oasis", "oasis"}, + {"atum", "ata"}, + {"atus", "atuses"}, + {"base", "bases"}, + {"cess", "cesses"}, + {"dium", "diums"}, + {"eses", "esis"}, + {"half", "halves"}, + {"hive", "hives"}, + {"iano", "ianos"}, + {"irus", "iri"}, + {"isis", "ises"}, + {"leus", "li"}, + {"mnus", "mni"}, + {"move", "moves"}, + {"news", "news"}, + {"odex", "odice"}, + {"oose", "eese"}, + {"ouse", "ouses"}, + {"ovum", "ova"}, + {"rion", "ria"}, + {"shoe", "shoes"}, + {"stis", "stes"}, + {"tive", "tives"}, + {"wife", "wives"}, + {"afe", "aves"}, + {"bfe", "bves"}, + {"box", "boxes"}, + {"cfe", "cves"}, + {"dfe", "dves"}, + {"dge", "dges"}, + {"efe", "eves"}, + {"gfe", "gves"}, + {"hfe", "hves"}, + {"ife", "ives"}, + {"itz", "itzes"}, + {"ium", "ia"}, + {"ize", "izes"}, + {"jfe", "jves"}, + {"kfe", "kves"}, + {"man", "men"}, + {"mfe", "mves"}, + {"nfe", "nves"}, + {"nna", "nnas"}, + {"oaf", "oaves"}, + {"oci", "ocus"}, + {"ode", "odes"}, + {"ofe", "oves"}, + {"oot", "eet"}, + {"pfe", "pves"}, + {"pse", "psis"}, + {"qfe", "qves"}, + {"quy", "quies"}, + {"rfe", "rves"}, + {"sfe", "sves"}, + {"tfe", "tves"}, + {"tum", "ta"}, + {"tus", "tuses"}, + {"ufe", "uves"}, + {"ula", "ulae"}, + {"ula", "ulas"}, + {"uli", "ulus"}, + {"use", "uses"}, + {"uss", "usses"}, + {"vfe", "vves"}, + {"wfe", "wves"}, + {"xfe", "xves"}, + {"yfe", "yves"}, + {"you", "you"}, + {"zfe", "zves"}, + {"by", "bies"}, + {"ch", "ches"}, + {"cy", "cies"}, + {"dy", "dies"}, + {"ex", "ices"}, + {"fy", "fies"}, + {"gy", "gies"}, + {"hy", "hies"}, + {"io", "ios"}, + {"jy", "jies"}, + {"ky", "kies"}, + {"ld", "ldren"}, + {"lf", "lves"}, + {"ly", "lies"}, + {"my", "mies"}, + {"ny", "nies"}, + {"ox", "oxen"}, + {"py", "pies"}, + {"qy", "qies"}, + {"rf", "rves"}, + {"ry", "ries"}, + {"sh", "shes"}, + {"ss", "sses"}, + {"sy", "sies"}, + {"ty", "ties"}, + {"tz", "tzes"}, + {"va", "vae"}, + {"vy", "vies"}, + {"wy", "wies"}, + {"xy", "xies"}, + {"zy", "zies"}, + {"zz", "zzes"}, + {"o", "oes"}, + {"x", "xes"}, +} + +func init() { + for _, suffix := range singularToPluralSuffixList { + AddPlural(suffix.singular, suffix.plural) + AddSingular(suffix.plural, suffix.singular) + } +} diff --git a/vendor/github.com/gobuffalo/flect/pluralize.go b/vendor/github.com/gobuffalo/flect/pluralize.go new file mode 100644 index 000000000..1b9d43e46 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/pluralize.go @@ -0,0 +1,49 @@ +package flect + +import ( + "strings" + "sync" +) + +var pluralMoot = &sync.RWMutex{} + +// Pluralize returns a plural version of the string +// user = users +// person = people +// datum = data +func Pluralize(s string) string { + return New(s).Pluralize().String() +} + +// Pluralize returns a plural version of the string +// user = users +// person = people +// datum = data +func (i Ident) Pluralize() Ident { + s := i.Original + if len(s) == 0 { + return New("") + } + + pluralMoot.RLock() + defer pluralMoot.RUnlock() + + ls := strings.ToLower(s) + if _, ok := pluralToSingle[ls]; ok { + return i + } + if p, ok := singleToPlural[ls]; ok { + return New(p) + } + for _, r := range pluralRules { + if strings.HasSuffix(ls, r.suffix) { + return New(r.fn(s)) + } + } + + if strings.HasSuffix(ls, "s") { + return i + } + + return New(i.String() + "s") +} diff --git a/vendor/github.com/gobuffalo/flect/rule.go b/vendor/github.com/gobuffalo/flect/rule.go new file mode 100644 index 000000000..dc616b337 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/rule.go @@ -0,0 +1,10 @@ +package flect + +type ruleFn func(string) string + +type rule struct { + suffix string + fn ruleFn +} + +func noop(s string) string { return s } diff --git a/vendor/github.com/gobuffalo/flect/singular_rules.go b/vendor/github.com/gobuffalo/flect/singular_rules.go new file mode 100644 index 000000000..b20371b35 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/singular_rules.go @@ -0,0 +1,23 @@ +package flect + +var singularRules = []rule{} + +// AddSingular adds a rule that will replace the given suffix with the replacement suffix. +func AddSingular(ext string, repl string) { + singularMoot.Lock() + defer singularMoot.Unlock() + singularRules = append(singularRules, rule{ + suffix: ext, + fn: func(s string) string { + s = s[:len(s)-len(ext)] + return s + repl + }, + }) + + singularRules = append(singularRules, rule{ + suffix: repl, + fn: func(s string) string { + return s + }, + }) +} diff --git a/vendor/github.com/gobuffalo/flect/singularize.go b/vendor/github.com/gobuffalo/flect/singularize.go new file mode 100644 index 000000000..a0f8545ef --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/singularize.go @@ -0,0 +1,47 @@ +package flect + +import ( + "strings" + "sync" +) + +var singularMoot = &sync.RWMutex{} + +// Singularize returns a singular version of the string +// users = user +// data = datum +// people = person +func Singularize(s string) string { + return New(s).Singularize().String() +} + +// Singularize returns a singular version of the string +// users = user +// data = datum +// people = person +func (i Ident) Singularize() Ident { + s := i.Original + if len(s) == 0 { + return i + } + + singularMoot.RLock() + defer singularMoot.RUnlock() + ls := strings.ToLower(s) + if p, ok := pluralToSingle[ls]; ok { + return New(p) + } + if _, ok := singleToPlural[ls]; ok { + return i + } + for _, r := range singularRules { + if strings.HasSuffix(ls, r.suffix) { + return New(r.fn(s)) + } + } + + if strings.HasSuffix(s, "s") { + return New(s[:len(s)-1]) + } + return i +} diff --git a/vendor/github.com/gobuffalo/flect/titleize.go b/vendor/github.com/gobuffalo/flect/titleize.go new file mode 100644 index 000000000..cbbf08a5a --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/titleize.go @@ -0,0 +1,30 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Titleize will capitalize the start of each part +// "Nice to see you!" = "Nice To See You!" +// "i've read a book! have you?" = "I've Read A Book! Have You?" +// "This is `code` ok" = "This Is `code` OK" +func Titleize(s string) string { + return New(s).Titleize().String() +} + +// Titleize will capitalize the start of each part +// "Nice to see you!" = "Nice To See You!" +// "i've read a book! have you?" = "I've Read A Book! Have You?" +// "This is `code` ok" = "This Is `code` OK" +func (i Ident) Titleize() Ident { + var parts []string + for _, part := range i.Parts { + x := string(unicode.ToTitle(rune(part[0]))) + if len(part) > 1 { + x += part[1:] + } + parts = append(parts, x) + } + return New(strings.Join(parts, " ")) +} diff --git a/vendor/github.com/gobuffalo/flect/underscore.go b/vendor/github.com/gobuffalo/flect/underscore.go new file mode 100644 index 000000000..b92488aa0 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/underscore.go @@ -0,0 +1,34 @@ +package flect + +import ( + "strings" + "unicode" +) + +// Underscore a string +// bob dylan = bob_dylan +// Nice to see you! = nice_to_see_you +// widgetID = widget_id +func Underscore(s string) string { + return New(s).Underscore().String() +} + +// Underscore a string +// bob dylan = bob_dylan +// Nice to see you! = nice_to_see_you +// widgetID = widget_id +func (i Ident) Underscore() Ident { + var out []string + for _, part := range i.Parts { + var x string + for _, c := range part { + if unicode.IsLetter(c) || unicode.IsDigit(c) { + x += string(c) + } + } + if x != "" { + out = append(out, x) + } + } + return New(strings.ToLower(strings.Join(out, "_"))) +} diff --git a/vendor/github.com/gobuffalo/flect/version.go b/vendor/github.com/gobuffalo/flect/version.go new file mode 100644 index 000000000..9624d5df2 --- /dev/null +++ b/vendor/github.com/gobuffalo/flect/version.go @@ -0,0 +1,4 @@ +package flect + +//Version holds Flect version number +const Version = "v0.1.6" diff --git a/vendor/github.com/golang/groupcache/lru/lru.go b/vendor/github.com/golang/groupcache/lru/lru.go index cdfe2991f..eac1c7664 100644 --- a/vendor/github.com/golang/groupcache/lru/lru.go +++ b/vendor/github.com/golang/groupcache/lru/lru.go @@ -25,7 +25,7 @@ type Cache struct { // an item is evicted. Zero means no limit. MaxEntries int - // OnEvicted optionally specificies a callback function to be + // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key Key, value interface{}) @@ -119,3 +119,15 @@ func (c *Cache) Len() int { } return c.ll.Len() } + +// Clear purges all stored items from the cache. +func (c *Cache) Clear() { + if c.OnEvicted != nil { + for _, e := range c.cache { + kv := e.Value.(*entry) + c.OnEvicted(kv.key, kv.value) + } + } + c.ll = nil + c.cache = nil +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go index 5351f36f3..4fd44c45e 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -7105,15 +7105,15 @@ func (m *Any) ToRawInfo() interface{} { // ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. func (m *ApiKeySecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7129,9 +7129,11 @@ func (m *ApiKeySecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7147,21 +7149,21 @@ func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. func (m *BodyParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) - } - if m.In != "" { - info = append(info, yaml.MapItem{Key: "in", Value: m.In}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } - if m.Schema != nil { - info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { @@ -7175,6 +7177,9 @@ func (m *BodyParameter) ToRawInfo() interface{} { // ToRawInfo returns a description of Contact suitable for JSON or YAML export. func (m *Contact) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7196,6 +7201,9 @@ func (m *Contact) ToRawInfo() interface{} { // ToRawInfo returns a description of Default suitable for JSON or YAML export. func (m *Default) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7208,6 +7216,9 @@ func (m *Default) ToRawInfo() interface{} { // ToRawInfo returns a description of Definitions suitable for JSON or YAML export. func (m *Definitions) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7220,12 +7231,13 @@ func (m *Definitions) ToRawInfo() interface{} { // ToRawInfo returns a description of Document suitable for JSON or YAML export. func (m *Document) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Swagger != "" { - info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) - } - if m.Info != nil { - info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Host != "" { info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) @@ -7242,9 +7254,8 @@ func (m *Document) ToRawInfo() interface{} { if len(m.Produces) != 0 { info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } - if m.Paths != nil { - info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Definitions != nil { info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) @@ -7294,6 +7305,9 @@ func (m *Document) ToRawInfo() interface{} { // ToRawInfo returns a description of Examples suitable for JSON or YAML export. func (m *Examples) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7306,12 +7320,14 @@ func (m *Examples) ToRawInfo() interface{} { // ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. func (m *ExternalDocs) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } - if m.Url != "" { - info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) if m.VendorExtension != nil { for _, item := range m.VendorExtension { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7324,6 +7340,9 @@ func (m *ExternalDocs) ToRawInfo() interface{} { // ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. func (m *FileSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Format != "" { info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } @@ -7340,9 +7359,8 @@ func (m *FileSchema) ToRawInfo() interface{} { if len(m.Required) != 0 { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) if m.ReadOnly != false { info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } @@ -7366,6 +7384,9 @@ func (m *FileSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } @@ -7451,9 +7472,11 @@ func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Header suitable for JSON or YAML export. func (m *Header) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) if m.Format != "" { info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } @@ -7524,6 +7547,9 @@ func (m *Header) ToRawInfo() interface{} { // ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } @@ -7606,6 +7632,9 @@ func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Headers suitable for JSON or YAML export. func (m *Headers) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -7618,12 +7647,13 @@ func (m *Headers) ToRawInfo() interface{} { // ToRawInfo returns a description of Info suitable for JSON or YAML export. func (m *Info) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Title != "" { - info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) - } - if m.Version != "" { - info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7650,6 +7680,9 @@ func (m *Info) ToRawInfo() interface{} { // ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. func (m *ItemsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if len(m.Schema) != 0 { items := make([]interface{}, 0) for _, item := range m.Schema { @@ -7664,9 +7697,11 @@ func (m *ItemsItem) ToRawInfo() interface{} { // ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. func (m *JsonReference) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.XRef != "" { - info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7676,9 +7711,11 @@ func (m *JsonReference) ToRawInfo() interface{} { // ToRawInfo returns a description of License suitable for JSON or YAML export. func (m *License) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) if m.Url != "" { info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } @@ -7694,6 +7731,9 @@ func (m *License) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. func (m *NamedAny) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7704,6 +7744,9 @@ func (m *NamedAny) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. func (m *NamedHeader) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7714,6 +7757,9 @@ func (m *NamedHeader) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. func (m *NamedParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7724,6 +7770,9 @@ func (m *NamedParameter) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. func (m *NamedPathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7734,6 +7783,9 @@ func (m *NamedPathItem) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. func (m *NamedResponse) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7744,6 +7796,9 @@ func (m *NamedResponse) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. func (m *NamedResponseValue) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7754,6 +7809,9 @@ func (m *NamedResponseValue) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. func (m *NamedSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7764,6 +7822,9 @@ func (m *NamedSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7774,6 +7835,9 @@ func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedString suitable for JSON or YAML export. func (m *NamedString) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7786,6 +7850,9 @@ func (m *NamedString) ToRawInfo() interface{} { // ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. func (m *NamedStringArray) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } @@ -7823,22 +7890,21 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - } - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7854,19 +7920,19 @@ func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7882,19 +7948,19 @@ func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7910,19 +7976,19 @@ func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Type != "" { - info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) - } - if m.Flow != "" { - info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) if m.Scopes != nil { info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if m.TokenUrl != "" { - info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -7938,6 +8004,9 @@ func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { // ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. func (m *Oauth2Scopes) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} return info } @@ -7945,6 +8014,9 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { // ToRawInfo returns a description of Operation suitable for JSON or YAML export. func (m *Operation) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if len(m.Tags) != 0 { info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) } @@ -7975,9 +8047,8 @@ func (m *Operation) ToRawInfo() interface{} { info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} - if m.Responses != nil { - info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) - } + // always include this required field. + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Schemes) != 0 { info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) @@ -8022,6 +8093,9 @@ func (m *Parameter) ToRawInfo() interface{} { // ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. func (m *ParameterDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8051,6 +8125,9 @@ func (m *ParametersItem) ToRawInfo() interface{} { // ToRawInfo returns a description of PathItem suitable for JSON or YAML export. func (m *PathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.XRef != "" { info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } @@ -8102,9 +8179,11 @@ func (m *PathItem) ToRawInfo() interface{} { // ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. func (m *PathParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Required != false { - info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) if m.In != "" { info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } @@ -8184,6 +8263,9 @@ func (m *PathParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Paths suitable for JSON or YAML export. func (m *Paths) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.VendorExtension != nil { for _, item := range m.VendorExtension { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8202,6 +8284,9 @@ func (m *Paths) ToRawInfo() interface{} { // ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. func (m *PrimitivesItems) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Type != "" { info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } @@ -8272,6 +8357,9 @@ func (m *PrimitivesItems) ToRawInfo() interface{} { // ToRawInfo returns a description of Properties suitable for JSON or YAML export. func (m *Properties) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8284,6 +8372,9 @@ func (m *Properties) ToRawInfo() interface{} { // ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. func (m *QueryParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Required != false { info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } @@ -8369,9 +8460,11 @@ func (m *QueryParameterSubSchema) ToRawInfo() interface{} { // ToRawInfo returns a description of Response suitable for JSON or YAML export. func (m *Response) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Description != "" { - info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) if m.Schema != nil { info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } @@ -8396,6 +8489,9 @@ func (m *Response) ToRawInfo() interface{} { // ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. func (m *ResponseDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8425,6 +8521,9 @@ func (m *ResponseValue) ToRawInfo() interface{} { // ToRawInfo returns a description of Responses suitable for JSON or YAML export. func (m *Responses) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.ResponseCode != nil { for _, item := range m.ResponseCode { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8443,6 +8542,9 @@ func (m *Responses) ToRawInfo() interface{} { // ToRawInfo returns a description of Schema suitable for JSON or YAML export. func (m *Schema) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.XRef != "" { info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } @@ -8588,6 +8690,9 @@ func (m *SchemaItem) ToRawInfo() interface{} { // ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. func (m *SecurityDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8637,6 +8742,9 @@ func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { // ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. func (m *SecurityRequirement) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8654,9 +8762,11 @@ func (m *StringArray) ToRawInfo() interface{} { // ToRawInfo returns a description of Tag suitable for JSON or YAML export. func (m *Tag) ToRawInfo() interface{} { info := yaml.MapSlice{} - if m.Name != "" { - info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m == nil { + return info } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) if m.Description != "" { info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } @@ -8676,6 +8786,9 @@ func (m *Tag) ToRawInfo() interface{} { // ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. func (m *TypeItem) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if len(m.Value) != 0 { info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } @@ -8685,6 +8798,9 @@ func (m *TypeItem) ToRawInfo() interface{} { // ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. func (m *VendorExtension) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) @@ -8697,6 +8813,9 @@ func (m *VendorExtension) ToRawInfo() interface{} { // ToRawInfo returns a description of Xml suitable for JSON or YAML export. func (m *Xml) ToRawInfo() interface{} { info := yaml.MapSlice{} + if m == nil { + return info + } if m.Name != "" { info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go index a030fa676..6199e7cb3 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -1,80 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: OpenAPIv2/OpenAPIv2.proto -/* -Package openapi_v2 is a generated protocol buffer package. - -It is generated from these files: - OpenAPIv2/OpenAPIv2.proto - -It has these top-level messages: - AdditionalPropertiesItem - Any - ApiKeySecurity - BasicAuthenticationSecurity - BodyParameter - Contact - Default - Definitions - Document - Examples - ExternalDocs - FileSchema - FormDataParameterSubSchema - Header - HeaderParameterSubSchema - Headers - Info - ItemsItem - JsonReference - License - NamedAny - NamedHeader - NamedParameter - NamedPathItem - NamedResponse - NamedResponseValue - NamedSchema - NamedSecurityDefinitionsItem - NamedString - NamedStringArray - NonBodyParameter - Oauth2AccessCodeSecurity - Oauth2ApplicationSecurity - Oauth2ImplicitSecurity - Oauth2PasswordSecurity - Oauth2Scopes - Operation - Parameter - ParameterDefinitions - ParametersItem - PathItem - PathParameterSubSchema - Paths - PrimitivesItems - Properties - QueryParameterSubSchema - Response - ResponseDefinitions - ResponseValue - Responses - Schema - SchemaItem - SecurityDefinitions - SecurityDefinitionsItem - SecurityRequirement - StringArray - Tag - TypeItem - VendorExtension - Xml -*/ package openapi_v2 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -85,32 +19,57 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package type AdditionalPropertiesItem struct { // Types that are valid to be assigned to Oneof: // *AdditionalPropertiesItem_Schema // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } -func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } -func (*AdditionalPropertiesItem) ProtoMessage() {} -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{0} +} + +func (m *AdditionalPropertiesItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AdditionalPropertiesItem.Unmarshal(m, b) +} +func (m *AdditionalPropertiesItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AdditionalPropertiesItem.Marshal(b, m, deterministic) +} +func (m *AdditionalPropertiesItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_AdditionalPropertiesItem.Merge(m, src) +} +func (m *AdditionalPropertiesItem) XXX_Size() int { + return xxx_messageInfo_AdditionalPropertiesItem.Size(m) +} +func (m *AdditionalPropertiesItem) XXX_DiscardUnknown() { + xxx_messageInfo_AdditionalPropertiesItem.DiscardUnknown(m) +} + +var xxx_messageInfo_AdditionalPropertiesItem proto.InternalMessageInfo type isAdditionalPropertiesItem_Oneof interface { isAdditionalPropertiesItem_Oneof() } type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } + type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` } -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { @@ -134,90 +93,48 @@ func (m *AdditionalPropertiesItem) GetBoolean() bool { return false } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*AdditionalPropertiesItem_Schema)(nil), (*AdditionalPropertiesItem_Boolean)(nil), } } -func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *AdditionalPropertiesItem_Boolean: - t := uint64(0) - if x.Boolean { - t = 1 - } - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(t) - case nil: - default: - return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*AdditionalPropertiesItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &AdditionalPropertiesItem_Schema{msg} - return true, err - case 2: // oneof.boolean - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} - return true, err - default: - return false, nil - } -} - -func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*AdditionalPropertiesItem) - // oneof - switch x := m.Oneof.(type) { - case *AdditionalPropertiesItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *AdditionalPropertiesItem_Boolean: - n += proto.SizeVarint(2<<3 | proto.WireVarint) - n += 1 - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type Any struct { + Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type Any struct { - Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{1} } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} -func (m *Any) GetValue() *google_protobuf.Any { +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetValue() *any.Any { if m != nil { return m.Value } @@ -232,17 +149,40 @@ func (m *Any) GetYaml() string { } type ApiKeySecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{2} +} + +func (m *ApiKeySecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApiKeySecurity.Unmarshal(m, b) +} +func (m *ApiKeySecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApiKeySecurity.Marshal(b, m, deterministic) +} +func (m *ApiKeySecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApiKeySecurity.Merge(m, src) +} +func (m *ApiKeySecurity) XXX_Size() int { + return xxx_messageInfo_ApiKeySecurity.Size(m) +} +func (m *ApiKeySecurity) XXX_DiscardUnknown() { + xxx_messageInfo_ApiKeySecurity.DiscardUnknown(m) } -func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } -func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } -func (*ApiKeySecurity) ProtoMessage() {} -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_ApiKeySecurity proto.InternalMessageInfo func (m *ApiKeySecurity) GetType() string { if m != nil { @@ -280,15 +220,38 @@ func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { } type BasicAuthenticationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{3} +} + +func (m *BasicAuthenticationSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BasicAuthenticationSecurity.Unmarshal(m, b) +} +func (m *BasicAuthenticationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BasicAuthenticationSecurity.Marshal(b, m, deterministic) +} +func (m *BasicAuthenticationSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_BasicAuthenticationSecurity.Merge(m, src) +} +func (m *BasicAuthenticationSecurity) XXX_Size() int { + return xxx_messageInfo_BasicAuthenticationSecurity.Size(m) +} +func (m *BasicAuthenticationSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_BasicAuthenticationSecurity.DiscardUnknown(m) } -func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } -func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } -func (*BasicAuthenticationSecurity) ProtoMessage() {} -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_BasicAuthenticationSecurity proto.InternalMessageInfo func (m *BasicAuthenticationSecurity) GetType() string { if m != nil { @@ -313,21 +276,44 @@ func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { type BodyParameter struct { // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *BodyParameter) Reset() { *m = BodyParameter{} } -func (m *BodyParameter) String() string { return proto.CompactTextString(m) } -func (*BodyParameter) ProtoMessage() {} -func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{4} +} + +func (m *BodyParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BodyParameter.Unmarshal(m, b) +} +func (m *BodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BodyParameter.Marshal(b, m, deterministic) +} +func (m *BodyParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_BodyParameter.Merge(m, src) +} +func (m *BodyParameter) XXX_Size() int { + return xxx_messageInfo_BodyParameter.Size(m) +} +func (m *BodyParameter) XXX_DiscardUnknown() { + xxx_messageInfo_BodyParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_BodyParameter proto.InternalMessageInfo func (m *BodyParameter) GetDescription() string { if m != nil { @@ -374,18 +360,41 @@ func (m *BodyParameter) GetVendorExtension() []*NamedAny { // Contact information for the owners of the API. type Contact struct { // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Contact) Reset() { *m = Contact{} } -func (m *Contact) String() string { return proto.CompactTextString(m) } -func (*Contact) ProtoMessage() {} -func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{5} +} + +func (m *Contact) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Contact.Unmarshal(m, b) +} +func (m *Contact) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Contact.Marshal(b, m, deterministic) +} +func (m *Contact) XXX_Merge(src proto.Message) { + xxx_messageInfo_Contact.Merge(m, src) +} +func (m *Contact) XXX_Size() int { + return xxx_messageInfo_Contact.Size(m) +} +func (m *Contact) XXX_DiscardUnknown() { + xxx_messageInfo_Contact.DiscardUnknown(m) +} + +var xxx_messageInfo_Contact proto.InternalMessageInfo func (m *Contact) GetName() string { if m != nil { @@ -416,13 +425,36 @@ func (m *Contact) GetVendorExtension() []*NamedAny { } type Default struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{6} +} + +func (m *Default) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Default.Unmarshal(m, b) +} +func (m *Default) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Default.Marshal(b, m, deterministic) +} +func (m *Default) XXX_Merge(src proto.Message) { + xxx_messageInfo_Default.Merge(m, src) +} +func (m *Default) XXX_Size() int { + return xxx_messageInfo_Default.Size(m) +} +func (m *Default) XXX_DiscardUnknown() { + xxx_messageInfo_Default.DiscardUnknown(m) } -func (m *Default) Reset() { *m = Default{} } -func (m *Default) String() string { return proto.CompactTextString(m) } -func (*Default) ProtoMessage() {} -func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +var xxx_messageInfo_Default proto.InternalMessageInfo func (m *Default) GetAdditionalProperties() []*NamedAny { if m != nil { @@ -433,13 +465,36 @@ func (m *Default) GetAdditionalProperties() []*NamedAny { // One or more JSON objects describing the schemas being consumed and produced by the API. type Definitions struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Definitions) Reset() { *m = Definitions{} } -func (m *Definitions) String() string { return proto.CompactTextString(m) } -func (*Definitions) ProtoMessage() {} -func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{7} +} + +func (m *Definitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Definitions.Unmarshal(m, b) +} +func (m *Definitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Definitions.Marshal(b, m, deterministic) +} +func (m *Definitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Definitions.Merge(m, src) +} +func (m *Definitions) XXX_Size() int { + return xxx_messageInfo_Definitions.Size(m) +} +func (m *Definitions) XXX_DiscardUnknown() { + xxx_messageInfo_Definitions.DiscardUnknown(m) +} + +var xxx_messageInfo_Definitions proto.InternalMessageInfo func (m *Definitions) GetAdditionalProperties() []*NamedSchema { if m != nil { @@ -450,33 +505,56 @@ func (m *Definitions) GetAdditionalProperties() []*NamedSchema { type Document struct { // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Document) Reset() { *m = Document{} } -func (m *Document) String() string { return proto.CompactTextString(m) } -func (*Document) ProtoMessage() {} -func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{8} +} + +func (m *Document) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Document.Unmarshal(m, b) +} +func (m *Document) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Document.Marshal(b, m, deterministic) +} +func (m *Document) XXX_Merge(src proto.Message) { + xxx_messageInfo_Document.Merge(m, src) +} +func (m *Document) XXX_Size() int { + return xxx_messageInfo_Document.Size(m) +} +func (m *Document) XXX_DiscardUnknown() { + xxx_messageInfo_Document.DiscardUnknown(m) +} + +var xxx_messageInfo_Document proto.InternalMessageInfo func (m *Document) GetSwagger() string { if m != nil { @@ -591,13 +669,36 @@ func (m *Document) GetVendorExtension() []*NamedAny { } type Examples struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{9} +} + +func (m *Examples) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Examples.Unmarshal(m, b) +} +func (m *Examples) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Examples.Marshal(b, m, deterministic) +} +func (m *Examples) XXX_Merge(src proto.Message) { + xxx_messageInfo_Examples.Merge(m, src) +} +func (m *Examples) XXX_Size() int { + return xxx_messageInfo_Examples.Size(m) +} +func (m *Examples) XXX_DiscardUnknown() { + xxx_messageInfo_Examples.DiscardUnknown(m) } -func (m *Examples) Reset() { *m = Examples{} } -func (m *Examples) String() string { return proto.CompactTextString(m) } -func (*Examples) ProtoMessage() {} -func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +var xxx_messageInfo_Examples proto.InternalMessageInfo func (m *Examples) GetAdditionalProperties() []*NamedAny { if m != nil { @@ -608,15 +709,38 @@ func (m *Examples) GetAdditionalProperties() []*NamedAny { // information about external documentation type ExternalDocs struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } -func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } -func (*ExternalDocs) ProtoMessage() {} -func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{10} +} + +func (m *ExternalDocs) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExternalDocs.Unmarshal(m, b) +} +func (m *ExternalDocs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExternalDocs.Marshal(b, m, deterministic) +} +func (m *ExternalDocs) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalDocs.Merge(m, src) +} +func (m *ExternalDocs) XXX_Size() int { + return xxx_messageInfo_ExternalDocs.Size(m) +} +func (m *ExternalDocs) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalDocs.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalDocs proto.InternalMessageInfo func (m *ExternalDocs) GetDescription() string { if m != nil { @@ -641,22 +765,45 @@ func (m *ExternalDocs) GetVendorExtension() []*NamedAny { // A deterministic version of a JSON Schema object. type FileSchema struct { - Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FileSchema) Reset() { *m = FileSchema{} } -func (m *FileSchema) String() string { return proto.CompactTextString(m) } -func (*FileSchema) ProtoMessage() {} -func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{11} +} + +func (m *FileSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileSchema.Unmarshal(m, b) +} +func (m *FileSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileSchema.Marshal(b, m, deterministic) +} +func (m *FileSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileSchema.Merge(m, src) +} +func (m *FileSchema) XXX_Size() int { + return xxx_messageInfo_FileSchema.Size(m) +} +func (m *FileSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FileSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FileSchema proto.InternalMessageInfo func (m *FileSchema) GetFormat() string { if m != nil { @@ -730,39 +877,62 @@ func (m *FileSchema) GetVendorExtension() []*NamedAny { type FormDataParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } -func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*FormDataParameterSubSchema) ProtoMessage() {} -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{12} +} + +func (m *FormDataParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FormDataParameterSubSchema.Unmarshal(m, b) +} +func (m *FormDataParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FormDataParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *FormDataParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_FormDataParameterSubSchema.Merge(m, src) +} +func (m *FormDataParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_FormDataParameterSubSchema.Size(m) +} +func (m *FormDataParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_FormDataParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_FormDataParameterSubSchema proto.InternalMessageInfo func (m *FormDataParameterSubSchema) GetRequired() bool { if m != nil { @@ -926,31 +1096,54 @@ func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { } type Header struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{13} +} + +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo func (m *Header) GetType() string { if m != nil { @@ -1087,37 +1280,60 @@ func (m *Header) GetVendorExtension() []*NamedAny { type HeaderParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } -func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*HeaderParameterSubSchema) ProtoMessage() {} -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{14} +} + +func (m *HeaderParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HeaderParameterSubSchema.Unmarshal(m, b) +} +func (m *HeaderParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HeaderParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *HeaderParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeaderParameterSubSchema.Merge(m, src) +} +func (m *HeaderParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_HeaderParameterSubSchema.Size(m) +} +func (m *HeaderParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_HeaderParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_HeaderParameterSubSchema proto.InternalMessageInfo func (m *HeaderParameterSubSchema) GetRequired() bool { if m != nil { @@ -1274,13 +1490,36 @@ func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { } type Headers struct { - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{15} +} + +func (m *Headers) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Headers.Unmarshal(m, b) +} +func (m *Headers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Headers.Marshal(b, m, deterministic) +} +func (m *Headers) XXX_Merge(src proto.Message) { + xxx_messageInfo_Headers.Merge(m, src) +} +func (m *Headers) XXX_Size() int { + return xxx_messageInfo_Headers.Size(m) +} +func (m *Headers) XXX_DiscardUnknown() { + xxx_messageInfo_Headers.DiscardUnknown(m) } -func (m *Headers) Reset() { *m = Headers{} } -func (m *Headers) String() string { return proto.CompactTextString(m) } -func (*Headers) ProtoMessage() {} -func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +var xxx_messageInfo_Headers proto.InternalMessageInfo func (m *Headers) GetAdditionalProperties() []*NamedHeader { if m != nil { @@ -1292,22 +1531,45 @@ func (m *Headers) GetAdditionalProperties() []*NamedHeader { // General information about the API. type Info struct { // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{16} +} + +func (m *Info) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Info.Unmarshal(m, b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) +} +func (m *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(m, src) +} +func (m *Info) XXX_Size() int { + return xxx_messageInfo_Info.Size(m) +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) } -func (m *Info) Reset() { *m = Info{} } -func (m *Info) String() string { return proto.CompactTextString(m) } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +var xxx_messageInfo_Info proto.InternalMessageInfo func (m *Info) GetTitle() string { if m != nil { @@ -1359,13 +1621,36 @@ func (m *Info) GetVendorExtension() []*NamedAny { } type ItemsItem struct { - Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ItemsItem) Reset() { *m = ItemsItem{} } -func (m *ItemsItem) String() string { return proto.CompactTextString(m) } -func (*ItemsItem) ProtoMessage() {} -func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{17} +} + +func (m *ItemsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ItemsItem.Unmarshal(m, b) +} +func (m *ItemsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ItemsItem.Marshal(b, m, deterministic) +} +func (m *ItemsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ItemsItem.Merge(m, src) +} +func (m *ItemsItem) XXX_Size() int { + return xxx_messageInfo_ItemsItem.Size(m) +} +func (m *ItemsItem) XXX_DiscardUnknown() { + xxx_messageInfo_ItemsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_ItemsItem proto.InternalMessageInfo func (m *ItemsItem) GetSchema() []*Schema { if m != nil { @@ -1375,14 +1660,37 @@ func (m *ItemsItem) GetSchema() []*Schema { } type JsonReference struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *JsonReference) Reset() { *m = JsonReference{} } -func (m *JsonReference) String() string { return proto.CompactTextString(m) } -func (*JsonReference) ProtoMessage() {} -func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{18} +} + +func (m *JsonReference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_JsonReference.Unmarshal(m, b) +} +func (m *JsonReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_JsonReference.Marshal(b, m, deterministic) +} +func (m *JsonReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_JsonReference.Merge(m, src) +} +func (m *JsonReference) XXX_Size() int { + return xxx_messageInfo_JsonReference.Size(m) +} +func (m *JsonReference) XXX_DiscardUnknown() { + xxx_messageInfo_JsonReference.DiscardUnknown(m) +} + +var xxx_messageInfo_JsonReference proto.InternalMessageInfo func (m *JsonReference) GetXRef() string { if m != nil { @@ -1400,16 +1708,39 @@ func (m *JsonReference) GetDescription() string { type License struct { // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{19} +} + +func (m *License) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_License.Unmarshal(m, b) +} +func (m *License) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_License.Marshal(b, m, deterministic) +} +func (m *License) XXX_Merge(src proto.Message) { + xxx_messageInfo_License.Merge(m, src) +} +func (m *License) XXX_Size() int { + return xxx_messageInfo_License.Size(m) +} +func (m *License) XXX_DiscardUnknown() { + xxx_messageInfo_License.DiscardUnknown(m) } -func (m *License) Reset() { *m = License{} } -func (m *License) String() string { return proto.CompactTextString(m) } -func (*License) ProtoMessage() {} -func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } +var xxx_messageInfo_License proto.InternalMessageInfo func (m *License) GetName() string { if m != nil { @@ -1435,15 +1766,38 @@ func (m *License) GetVendorExtension() []*NamedAny { // Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. type NamedAny struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{20} } -func (m *NamedAny) Reset() { *m = NamedAny{} } -func (m *NamedAny) String() string { return proto.CompactTextString(m) } -func (*NamedAny) ProtoMessage() {} -func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } +func (m *NamedAny) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedAny.Unmarshal(m, b) +} +func (m *NamedAny) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedAny.Marshal(b, m, deterministic) +} +func (m *NamedAny) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedAny.Merge(m, src) +} +func (m *NamedAny) XXX_Size() int { + return xxx_messageInfo_NamedAny.Size(m) +} +func (m *NamedAny) XXX_DiscardUnknown() { + xxx_messageInfo_NamedAny.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedAny proto.InternalMessageInfo func (m *NamedAny) GetName() string { if m != nil { @@ -1462,15 +1816,38 @@ func (m *NamedAny) GetValue() *Any { // Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. type NamedHeader struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedHeader) Reset() { *m = NamedHeader{} } -func (m *NamedHeader) String() string { return proto.CompactTextString(m) } -func (*NamedHeader) ProtoMessage() {} -func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{21} +} + +func (m *NamedHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedHeader.Unmarshal(m, b) +} +func (m *NamedHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedHeader.Marshal(b, m, deterministic) +} +func (m *NamedHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedHeader.Merge(m, src) +} +func (m *NamedHeader) XXX_Size() int { + return xxx_messageInfo_NamedHeader.Size(m) +} +func (m *NamedHeader) XXX_DiscardUnknown() { + xxx_messageInfo_NamedHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedHeader proto.InternalMessageInfo func (m *NamedHeader) GetName() string { if m != nil { @@ -1489,15 +1866,38 @@ func (m *NamedHeader) GetValue() *Header { // Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. type NamedParameter struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedParameter) Reset() { *m = NamedParameter{} } -func (m *NamedParameter) String() string { return proto.CompactTextString(m) } -func (*NamedParameter) ProtoMessage() {} -func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{22} +} + +func (m *NamedParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedParameter.Unmarshal(m, b) +} +func (m *NamedParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedParameter.Marshal(b, m, deterministic) +} +func (m *NamedParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedParameter.Merge(m, src) +} +func (m *NamedParameter) XXX_Size() int { + return xxx_messageInfo_NamedParameter.Size(m) +} +func (m *NamedParameter) XXX_DiscardUnknown() { + xxx_messageInfo_NamedParameter.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedParameter proto.InternalMessageInfo func (m *NamedParameter) GetName() string { if m != nil { @@ -1516,15 +1916,38 @@ func (m *NamedParameter) GetValue() *Parameter { // Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. type NamedPathItem struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } -func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } -func (*NamedPathItem) ProtoMessage() {} -func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{23} +} + +func (m *NamedPathItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedPathItem.Unmarshal(m, b) +} +func (m *NamedPathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedPathItem.Marshal(b, m, deterministic) +} +func (m *NamedPathItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedPathItem.Merge(m, src) +} +func (m *NamedPathItem) XXX_Size() int { + return xxx_messageInfo_NamedPathItem.Size(m) +} +func (m *NamedPathItem) XXX_DiscardUnknown() { + xxx_messageInfo_NamedPathItem.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedPathItem proto.InternalMessageInfo func (m *NamedPathItem) GetName() string { if m != nil { @@ -1543,15 +1966,38 @@ func (m *NamedPathItem) GetValue() *PathItem { // Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. type NamedResponse struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{24} } -func (m *NamedResponse) Reset() { *m = NamedResponse{} } -func (m *NamedResponse) String() string { return proto.CompactTextString(m) } -func (*NamedResponse) ProtoMessage() {} -func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } +func (m *NamedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedResponse.Unmarshal(m, b) +} +func (m *NamedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedResponse.Marshal(b, m, deterministic) +} +func (m *NamedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResponse.Merge(m, src) +} +func (m *NamedResponse) XXX_Size() int { + return xxx_messageInfo_NamedResponse.Size(m) +} +func (m *NamedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResponse proto.InternalMessageInfo func (m *NamedResponse) GetName() string { if m != nil { @@ -1570,15 +2016,38 @@ func (m *NamedResponse) GetValue() *Response { // Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. type NamedResponseValue struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } -func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } -func (*NamedResponseValue) ProtoMessage() {} -func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{25} +} + +func (m *NamedResponseValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedResponseValue.Unmarshal(m, b) +} +func (m *NamedResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedResponseValue.Marshal(b, m, deterministic) +} +func (m *NamedResponseValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedResponseValue.Merge(m, src) +} +func (m *NamedResponseValue) XXX_Size() int { + return xxx_messageInfo_NamedResponseValue.Size(m) +} +func (m *NamedResponseValue) XXX_DiscardUnknown() { + xxx_messageInfo_NamedResponseValue.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedResponseValue proto.InternalMessageInfo func (m *NamedResponseValue) GetName() string { if m != nil { @@ -1597,15 +2066,38 @@ func (m *NamedResponseValue) GetValue() *ResponseValue { // Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. type NamedSchema struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{26} +} + +func (m *NamedSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedSchema.Unmarshal(m, b) +} +func (m *NamedSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedSchema.Marshal(b, m, deterministic) +} +func (m *NamedSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedSchema.Merge(m, src) +} +func (m *NamedSchema) XXX_Size() int { + return xxx_messageInfo_NamedSchema.Size(m) +} +func (m *NamedSchema) XXX_DiscardUnknown() { + xxx_messageInfo_NamedSchema.DiscardUnknown(m) } -func (m *NamedSchema) Reset() { *m = NamedSchema{} } -func (m *NamedSchema) String() string { return proto.CompactTextString(m) } -func (*NamedSchema) ProtoMessage() {} -func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } +var xxx_messageInfo_NamedSchema proto.InternalMessageInfo func (m *NamedSchema) GetName() string { if m != nil { @@ -1624,15 +2116,38 @@ func (m *NamedSchema) GetValue() *Schema { // Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. type NamedSecurityDefinitionsItem struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } -func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{27} +} + +func (m *NamedSecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Unmarshal(m, b) +} +func (m *NamedSecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Marshal(b, m, deterministic) +} +func (m *NamedSecurityDefinitionsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedSecurityDefinitionsItem.Merge(m, src) +} +func (m *NamedSecurityDefinitionsItem) XXX_Size() int { + return xxx_messageInfo_NamedSecurityDefinitionsItem.Size(m) +} +func (m *NamedSecurityDefinitionsItem) XXX_DiscardUnknown() { + xxx_messageInfo_NamedSecurityDefinitionsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedSecurityDefinitionsItem proto.InternalMessageInfo func (m *NamedSecurityDefinitionsItem) GetName() string { if m != nil { @@ -1651,15 +2166,38 @@ func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { // Automatically-generated message used to represent maps of string as ordered (name,value) pairs. type NamedString struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{28} +} + +func (m *NamedString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedString.Unmarshal(m, b) +} +func (m *NamedString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedString.Marshal(b, m, deterministic) +} +func (m *NamedString) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedString.Merge(m, src) +} +func (m *NamedString) XXX_Size() int { + return xxx_messageInfo_NamedString.Size(m) +} +func (m *NamedString) XXX_DiscardUnknown() { + xxx_messageInfo_NamedString.DiscardUnknown(m) } -func (m *NamedString) Reset() { *m = NamedString{} } -func (m *NamedString) String() string { return proto.CompactTextString(m) } -func (*NamedString) ProtoMessage() {} -func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } +var xxx_messageInfo_NamedString proto.InternalMessageInfo func (m *NamedString) GetName() string { if m != nil { @@ -1678,15 +2216,38 @@ func (m *NamedString) GetValue() string { // Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. type NamedStringArray struct { // Map key - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{29} } -func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } -func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } -func (*NamedStringArray) ProtoMessage() {} -func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } +func (m *NamedStringArray) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NamedStringArray.Unmarshal(m, b) +} +func (m *NamedStringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NamedStringArray.Marshal(b, m, deterministic) +} +func (m *NamedStringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedStringArray.Merge(m, src) +} +func (m *NamedStringArray) XXX_Size() int { + return xxx_messageInfo_NamedStringArray.Size(m) +} +func (m *NamedStringArray) XXX_DiscardUnknown() { + xxx_messageInfo_NamedStringArray.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedStringArray proto.InternalMessageInfo func (m *NamedStringArray) GetName() string { if m != nil { @@ -1708,35 +2269,64 @@ type NonBodyParameter struct { // *NonBodyParameter_FormDataParameterSubSchema // *NonBodyParameter_QueryParameterSubSchema // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{30} +} + +func (m *NonBodyParameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NonBodyParameter.Unmarshal(m, b) +} +func (m *NonBodyParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NonBodyParameter.Marshal(b, m, deterministic) +} +func (m *NonBodyParameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonBodyParameter.Merge(m, src) +} +func (m *NonBodyParameter) XXX_Size() int { + return xxx_messageInfo_NonBodyParameter.Size(m) +} +func (m *NonBodyParameter) XXX_DiscardUnknown() { + xxx_messageInfo_NonBodyParameter.DiscardUnknown(m) } -func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } -func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } -func (*NonBodyParameter) ProtoMessage() {} -func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } +var xxx_messageInfo_NonBodyParameter proto.InternalMessageInfo type isNonBodyParameter_Oneof interface { isNonBodyParameter_Oneof() } type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` } + type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` } + type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` } + type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` } -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { if m != nil { @@ -1773,9 +2363,9 @@ func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*NonBodyParameter_HeaderParameterSubSchema)(nil), (*NonBodyParameter_FormDataParameterSubSchema)(nil), (*NonBodyParameter_QueryParameterSubSchema)(nil), @@ -1783,122 +2373,43 @@ func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buff } } -func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_FormDataParameterSubSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_QueryParameterSubSchema: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { - return err - } - case *NonBodyParameter_PathParameterSubSchema: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*NonBodyParameter) - switch tag { - case 1: // oneof.header_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(HeaderParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} - return true, err - case 2: // oneof.form_data_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FormDataParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} - return true, err - case 3: // oneof.query_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(QueryParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} - return true, err - case 4: // oneof.path_parameter_sub_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(PathParameterSubSchema) - err := b.DecodeMessage(msg) - m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} - return true, err - default: - return false, nil - } -} - -func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*NonBodyParameter) - // oneof - switch x := m.Oneof.(type) { - case *NonBodyParameter_HeaderParameterSubSchema: - s := proto.Size(x.HeaderParameterSubSchema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_FormDataParameterSubSchema: - s := proto.Size(x.FormDataParameterSubSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_QueryParameterSubSchema: - s := proto.Size(x.QueryParameterSubSchema) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *NonBodyParameter_PathParameterSubSchema: - s := proto.Size(x.PathParameterSubSchema) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type Oauth2AccessCodeSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{31} } -func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } -func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } +func (m *Oauth2AccessCodeSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Unmarshal(m, b) +} +func (m *Oauth2AccessCodeSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2AccessCodeSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2AccessCodeSecurity.Merge(m, src) +} +func (m *Oauth2AccessCodeSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2AccessCodeSecurity.Size(m) +} +func (m *Oauth2AccessCodeSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2AccessCodeSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2AccessCodeSecurity proto.InternalMessageInfo func (m *Oauth2AccessCodeSecurity) GetType() string { if m != nil { @@ -1950,18 +2461,41 @@ func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { } type Oauth2ApplicationSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } -func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ApplicationSecurity) ProtoMessage() {} -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{32} +} + +func (m *Oauth2ApplicationSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2ApplicationSecurity.Unmarshal(m, b) +} +func (m *Oauth2ApplicationSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2ApplicationSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2ApplicationSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2ApplicationSecurity.Merge(m, src) +} +func (m *Oauth2ApplicationSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2ApplicationSecurity.Size(m) +} +func (m *Oauth2ApplicationSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2ApplicationSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2ApplicationSecurity proto.InternalMessageInfo func (m *Oauth2ApplicationSecurity) GetType() string { if m != nil { @@ -2006,18 +2540,41 @@ func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { } type Oauth2ImplicitSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{33} +} + +func (m *Oauth2ImplicitSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2ImplicitSecurity.Unmarshal(m, b) +} +func (m *Oauth2ImplicitSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2ImplicitSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2ImplicitSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2ImplicitSecurity.Merge(m, src) +} +func (m *Oauth2ImplicitSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2ImplicitSecurity.Size(m) +} +func (m *Oauth2ImplicitSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2ImplicitSecurity.DiscardUnknown(m) } -func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } -func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2ImplicitSecurity) ProtoMessage() {} -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } +var xxx_messageInfo_Oauth2ImplicitSecurity proto.InternalMessageInfo func (m *Oauth2ImplicitSecurity) GetType() string { if m != nil { @@ -2062,18 +2619,41 @@ func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { } type Oauth2PasswordSecurity struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{34} } -func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } -func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } -func (*Oauth2PasswordSecurity) ProtoMessage() {} -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } +func (m *Oauth2PasswordSecurity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2PasswordSecurity.Unmarshal(m, b) +} +func (m *Oauth2PasswordSecurity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2PasswordSecurity.Marshal(b, m, deterministic) +} +func (m *Oauth2PasswordSecurity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2PasswordSecurity.Merge(m, src) +} +func (m *Oauth2PasswordSecurity) XXX_Size() int { + return xxx_messageInfo_Oauth2PasswordSecurity.Size(m) +} +func (m *Oauth2PasswordSecurity) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2PasswordSecurity.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2PasswordSecurity proto.InternalMessageInfo func (m *Oauth2PasswordSecurity) GetType() string { if m != nil { @@ -2118,13 +2698,36 @@ func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { } type Oauth2Scopes struct { - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } -func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } -func (*Oauth2Scopes) ProtoMessage() {} -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{35} +} + +func (m *Oauth2Scopes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Oauth2Scopes.Unmarshal(m, b) +} +func (m *Oauth2Scopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Oauth2Scopes.Marshal(b, m, deterministic) +} +func (m *Oauth2Scopes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Oauth2Scopes.Merge(m, src) +} +func (m *Oauth2Scopes) XXX_Size() int { + return xxx_messageInfo_Oauth2Scopes.Size(m) +} +func (m *Oauth2Scopes) XXX_DiscardUnknown() { + xxx_messageInfo_Oauth2Scopes.DiscardUnknown(m) +} + +var xxx_messageInfo_Oauth2Scopes proto.InternalMessageInfo func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { if m != nil { @@ -2134,32 +2737,55 @@ func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { } type Operation struct { - Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{36} +} + +func (m *Operation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Operation.Unmarshal(m, b) +} +func (m *Operation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Operation.Marshal(b, m, deterministic) +} +func (m *Operation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operation.Merge(m, src) +} +func (m *Operation) XXX_Size() int { + return xxx_messageInfo_Operation.Size(m) +} +func (m *Operation) XXX_DiscardUnknown() { + xxx_messageInfo_Operation.DiscardUnknown(m) } -func (m *Operation) Reset() { *m = Operation{} } -func (m *Operation) String() string { return proto.CompactTextString(m) } -func (*Operation) ProtoMessage() {} -func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } +var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *Operation) GetTags() []string { if m != nil { @@ -2256,26 +2882,51 @@ type Parameter struct { // Types that are valid to be assigned to Oneof: // *Parameter_BodyParameter // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Parameter) Reset() { *m = Parameter{} } -func (m *Parameter) String() string { return proto.CompactTextString(m) } -func (*Parameter) ProtoMessage() {} -func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{37} +} + +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Parameter.Unmarshal(m, b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Parameter.Marshal(b, m, deterministic) +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return xxx_messageInfo_Parameter.Size(m) +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo type isParameter_Oneof interface { isParameter_Oneof() } type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` } + type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` } -func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_BodyParameter) isParameter_Oneof() {} + func (*Parameter_NonBodyParameter) isParameter_Oneof() {} func (m *Parameter) GetOneof() isParameter_Oneof { @@ -2299,89 +2950,46 @@ func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Parameter) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*Parameter_BodyParameter)(nil), (*Parameter_NonBodyParameter)(nil), } } -func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BodyParameter); err != nil { - return err - } - case *Parameter_NonBodyParameter: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.NonBodyParameter); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) - } - return nil -} - -func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Parameter) - switch tag { - case 1: // oneof.body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_BodyParameter{msg} - return true, err - case 2: // oneof.non_body_parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(NonBodyParameter) - err := b.DecodeMessage(msg) - m.Oneof = &Parameter_NonBodyParameter{msg} - return true, err - default: - return false, nil - } -} - -func _Parameter_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Parameter) - // oneof - switch x := m.Oneof.(type) { - case *Parameter_BodyParameter: - s := proto.Size(x.BodyParameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *Parameter_NonBodyParameter: - s := proto.Size(x.NonBodyParameter) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - // One or more JSON representations for parameters type ParameterDefinitions struct { - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } -func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } -func (*ParameterDefinitions) ProtoMessage() {} -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{38} +} + +func (m *ParameterDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParameterDefinitions.Unmarshal(m, b) +} +func (m *ParameterDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParameterDefinitions.Marshal(b, m, deterministic) +} +func (m *ParameterDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParameterDefinitions.Merge(m, src) +} +func (m *ParameterDefinitions) XXX_Size() int { + return xxx_messageInfo_ParameterDefinitions.Size(m) +} +func (m *ParameterDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_ParameterDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_ParameterDefinitions proto.InternalMessageInfo func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { if m != nil { @@ -2394,26 +3002,51 @@ type ParametersItem struct { // Types that are valid to be assigned to Oneof: // *ParametersItem_Parameter // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{39} +} + +func (m *ParametersItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParametersItem.Unmarshal(m, b) +} +func (m *ParametersItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParametersItem.Marshal(b, m, deterministic) +} +func (m *ParametersItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParametersItem.Merge(m, src) +} +func (m *ParametersItem) XXX_Size() int { + return xxx_messageInfo_ParametersItem.Size(m) +} +func (m *ParametersItem) XXX_DiscardUnknown() { + xxx_messageInfo_ParametersItem.DiscardUnknown(m) } -func (m *ParametersItem) Reset() { *m = ParametersItem{} } -func (m *ParametersItem) String() string { return proto.CompactTextString(m) } -func (*ParametersItem) ProtoMessage() {} -func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } +var xxx_messageInfo_ParametersItem proto.InternalMessageInfo type isParametersItem_Oneof interface { isParametersItem_Oneof() } type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` } + type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} func (m *ParametersItem) GetOneof() isParametersItem_Oneof { @@ -2437,98 +3070,55 @@ func (m *ParametersItem) GetJsonReference() *JsonReference { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*ParametersItem_Parameter)(nil), (*ParametersItem_JsonReference)(nil), } } -func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Parameter); err != nil { - return err - } - case *ParametersItem_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ParametersItem) - switch tag { - case 1: // oneof.parameter - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Parameter) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_Parameter{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ParametersItem_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ParametersItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ParametersItem) - // oneof - switch x := m.Oneof.(type) { - case *ParametersItem_Parameter: - s := proto.Size(x.Parameter) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ParametersItem_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - type PathItem struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *PathItem) Reset() { *m = PathItem{} } -func (m *PathItem) String() string { return proto.CompactTextString(m) } -func (*PathItem) ProtoMessage() {} -func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{40} +} + +func (m *PathItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PathItem.Unmarshal(m, b) +} +func (m *PathItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PathItem.Marshal(b, m, deterministic) +} +func (m *PathItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathItem.Merge(m, src) +} +func (m *PathItem) XXX_Size() int { + return xxx_messageInfo_PathItem.Size(m) +} +func (m *PathItem) XXX_DiscardUnknown() { + xxx_messageInfo_PathItem.DiscardUnknown(m) +} + +var xxx_messageInfo_PathItem proto.InternalMessageInfo func (m *PathItem) GetXRef() string { if m != nil { @@ -2602,37 +3192,60 @@ func (m *PathItem) GetVendorExtension() []*NamedAny { type PathParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } -func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*PathParameterSubSchema) ProtoMessage() {} -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{41} +} + +func (m *PathParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PathParameterSubSchema.Unmarshal(m, b) +} +func (m *PathParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PathParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *PathParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_PathParameterSubSchema.Merge(m, src) +} +func (m *PathParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_PathParameterSubSchema.Size(m) +} +func (m *PathParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_PathParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_PathParameterSubSchema proto.InternalMessageInfo func (m *PathParameterSubSchema) GetRequired() bool { if m != nil { @@ -2790,14 +3403,37 @@ func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { // Relative paths to the individual endpoints. They must be relative to the 'basePath'. type Paths struct { - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{42} +} + +func (m *Paths) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Paths.Unmarshal(m, b) +} +func (m *Paths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Paths.Marshal(b, m, deterministic) +} +func (m *Paths) XXX_Merge(src proto.Message) { + xxx_messageInfo_Paths.Merge(m, src) +} +func (m *Paths) XXX_Size() int { + return xxx_messageInfo_Paths.Size(m) +} +func (m *Paths) XXX_DiscardUnknown() { + xxx_messageInfo_Paths.DiscardUnknown(m) } -func (m *Paths) Reset() { *m = Paths{} } -func (m *Paths) String() string { return proto.CompactTextString(m) } -func (*Paths) ProtoMessage() {} -func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } +var xxx_messageInfo_Paths proto.InternalMessageInfo func (m *Paths) GetVendorExtension() []*NamedAny { if m != nil { @@ -2814,30 +3450,53 @@ func (m *Paths) GetPath() []*NamedPathItem { } type PrimitivesItems struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } -func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } -func (*PrimitivesItems) ProtoMessage() {} -func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{43} +} + +func (m *PrimitivesItems) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PrimitivesItems.Unmarshal(m, b) +} +func (m *PrimitivesItems) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PrimitivesItems.Marshal(b, m, deterministic) +} +func (m *PrimitivesItems) XXX_Merge(src proto.Message) { + xxx_messageInfo_PrimitivesItems.Merge(m, src) +} +func (m *PrimitivesItems) XXX_Size() int { + return xxx_messageInfo_PrimitivesItems.Size(m) +} +func (m *PrimitivesItems) XXX_DiscardUnknown() { + xxx_messageInfo_PrimitivesItems.DiscardUnknown(m) +} + +var xxx_messageInfo_PrimitivesItems proto.InternalMessageInfo func (m *PrimitivesItems) GetType() string { if m != nil { @@ -2966,13 +3625,36 @@ func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { } type Properties struct { - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{44} +} + +func (m *Properties) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Properties.Unmarshal(m, b) +} +func (m *Properties) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Properties.Marshal(b, m, deterministic) +} +func (m *Properties) XXX_Merge(src proto.Message) { + xxx_messageInfo_Properties.Merge(m, src) +} +func (m *Properties) XXX_Size() int { + return xxx_messageInfo_Properties.Size(m) +} +func (m *Properties) XXX_DiscardUnknown() { + xxx_messageInfo_Properties.DiscardUnknown(m) } -func (m *Properties) Reset() { *m = Properties{} } -func (m *Properties) String() string { return proto.CompactTextString(m) } -func (*Properties) ProtoMessage() {} -func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } +var xxx_messageInfo_Properties proto.InternalMessageInfo func (m *Properties) GetAdditionalProperties() []*NamedSchema { if m != nil { @@ -2983,39 +3665,62 @@ func (m *Properties) GetAdditionalProperties() []*NamedSchema { type QueryParameterSubSchema struct { // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } -func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } -func (*QueryParameterSubSchema) ProtoMessage() {} -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{45} +} + +func (m *QueryParameterSubSchema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryParameterSubSchema.Unmarshal(m, b) +} +func (m *QueryParameterSubSchema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryParameterSubSchema.Marshal(b, m, deterministic) +} +func (m *QueryParameterSubSchema) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParameterSubSchema.Merge(m, src) +} +func (m *QueryParameterSubSchema) XXX_Size() int { + return xxx_messageInfo_QueryParameterSubSchema.Size(m) +} +func (m *QueryParameterSubSchema) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParameterSubSchema.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParameterSubSchema proto.InternalMessageInfo func (m *QueryParameterSubSchema) GetRequired() bool { if m != nil { @@ -3179,17 +3884,40 @@ func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { } type Response struct { - Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{46} +} + +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) } -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } +var xxx_messageInfo_Response proto.InternalMessageInfo func (m *Response) GetDescription() string { if m != nil { @@ -3228,13 +3956,36 @@ func (m *Response) GetVendorExtension() []*NamedAny { // One or more JSON representations for parameters type ResponseDefinitions struct { - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } -func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } -func (*ResponseDefinitions) ProtoMessage() {} -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{47} +} + +func (m *ResponseDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseDefinitions.Unmarshal(m, b) +} +func (m *ResponseDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseDefinitions.Marshal(b, m, deterministic) +} +func (m *ResponseDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDefinitions.Merge(m, src) +} +func (m *ResponseDefinitions) XXX_Size() int { + return xxx_messageInfo_ResponseDefinitions.Size(m) +} +func (m *ResponseDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseDefinitions proto.InternalMessageInfo func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { if m != nil { @@ -3247,26 +3998,51 @@ type ResponseValue struct { // Types that are valid to be assigned to Oneof: // *ResponseValue_Response // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseValue) Reset() { *m = ResponseValue{} } -func (m *ResponseValue) String() string { return proto.CompactTextString(m) } -func (*ResponseValue) ProtoMessage() {} -func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{48} +} + +func (m *ResponseValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ResponseValue.Unmarshal(m, b) +} +func (m *ResponseValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ResponseValue.Marshal(b, m, deterministic) +} +func (m *ResponseValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseValue.Merge(m, src) +} +func (m *ResponseValue) XXX_Size() int { + return xxx_messageInfo_ResponseValue.Size(m) +} +func (m *ResponseValue) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseValue proto.InternalMessageInfo type isResponseValue_Oneof interface { isResponseValue_Oneof() } type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` } + type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` } -func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_Response) isResponseValue_Oneof() {} + func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} func (m *ResponseValue) GetOneof() isResponseValue_Oneof { @@ -3290,90 +4066,47 @@ func (m *ResponseValue) GetJsonReference() *JsonReference { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*ResponseValue_Response)(nil), (*ResponseValue_JsonReference)(nil), } } -func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Response); err != nil { - return err - } - case *ResponseValue_JsonReference: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.JsonReference); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) - } - return nil -} - -func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*ResponseValue) - switch tag { - case 1: // oneof.response - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Response) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_Response{msg} - return true, err - case 2: // oneof.json_reference - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(JsonReference) - err := b.DecodeMessage(msg) - m.Oneof = &ResponseValue_JsonReference{msg} - return true, err - default: - return false, nil - } -} - -func _ResponseValue_OneofSizer(msg proto.Message) (n int) { - m := msg.(*ResponseValue) - // oneof - switch x := m.Oneof.(type) { - case *ResponseValue_Response: - s := proto.Size(x.Response) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *ResponseValue_JsonReference: - s := proto.Size(x.JsonReference) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - // Response objects names can either be any valid HTTP status code or 'default'. type Responses struct { - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{49} +} + +func (m *Responses) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Responses.Unmarshal(m, b) +} +func (m *Responses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Responses.Marshal(b, m, deterministic) +} +func (m *Responses) XXX_Merge(src proto.Message) { + xxx_messageInfo_Responses.Merge(m, src) +} +func (m *Responses) XXX_Size() int { + return xxx_messageInfo_Responses.Size(m) +} +func (m *Responses) XXX_DiscardUnknown() { + xxx_messageInfo_Responses.DiscardUnknown(m) } -func (m *Responses) Reset() { *m = Responses{} } -func (m *Responses) String() string { return proto.CompactTextString(m) } -func (*Responses) ProtoMessage() {} -func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } +var xxx_messageInfo_Responses proto.InternalMessageInfo func (m *Responses) GetResponseCode() []*NamedResponseValue { if m != nil { @@ -3391,43 +4124,66 @@ func (m *Responses) GetVendorExtension() []*NamedAny { // A deterministic version of a JSON Schema object. type Schema struct { - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` -} - -func (m *Schema) Reset() { *m = Schema{} } -func (m *Schema) String() string { return proto.CompactTextString(m) } -func (*Schema) ProtoMessage() {} -func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{50} +} + +func (m *Schema) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Schema.Unmarshal(m, b) +} +func (m *Schema) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Schema.Marshal(b, m, deterministic) +} +func (m *Schema) XXX_Merge(src proto.Message) { + xxx_messageInfo_Schema.Merge(m, src) +} +func (m *Schema) XXX_Size() int { + return xxx_messageInfo_Schema.Size(m) +} +func (m *Schema) XXX_DiscardUnknown() { + xxx_messageInfo_Schema.DiscardUnknown(m) +} + +var xxx_messageInfo_Schema proto.InternalMessageInfo func (m *Schema) GetXRef() string { if m != nil { @@ -3650,26 +4406,51 @@ type SchemaItem struct { // Types that are valid to be assigned to Oneof: // *SchemaItem_Schema // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *SchemaItem) Reset() { *m = SchemaItem{} } -func (m *SchemaItem) String() string { return proto.CompactTextString(m) } -func (*SchemaItem) ProtoMessage() {} -func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{51} +} + +func (m *SchemaItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SchemaItem.Unmarshal(m, b) +} +func (m *SchemaItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SchemaItem.Marshal(b, m, deterministic) +} +func (m *SchemaItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchemaItem.Merge(m, src) +} +func (m *SchemaItem) XXX_Size() int { + return xxx_messageInfo_SchemaItem.Size(m) +} +func (m *SchemaItem) XXX_DiscardUnknown() { + xxx_messageInfo_SchemaItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SchemaItem proto.InternalMessageInfo type isSchemaItem_Oneof interface { isSchemaItem_Oneof() } type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` } + type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` } -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { @@ -3693,88 +4474,45 @@ func (m *SchemaItem) GetFileSchema() *FileSchema { return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*SchemaItem_Schema)(nil), (*SchemaItem_FileSchema)(nil), } } -func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Schema); err != nil { - return err - } - case *SchemaItem_FileSchema: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.FileSchema); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SchemaItem) - switch tag { - case 1: // oneof.schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Schema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_Schema{msg} - return true, err - case 2: // oneof.file_schema - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(FileSchema) - err := b.DecodeMessage(msg) - m.Oneof = &SchemaItem_FileSchema{msg} - return true, err - default: - return false, nil - } -} - -func _SchemaItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SchemaItem) - // oneof - switch x := m.Oneof.(type) { - case *SchemaItem_Schema: - s := proto.Size(x.Schema) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SchemaItem_FileSchema: - s := proto.Size(x.FileSchema) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type SecurityDefinitions struct { - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{52} } -func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } -func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitions) ProtoMessage() {} -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } +func (m *SecurityDefinitions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityDefinitions.Unmarshal(m, b) +} +func (m *SecurityDefinitions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityDefinitions.Marshal(b, m, deterministic) +} +func (m *SecurityDefinitions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityDefinitions.Merge(m, src) +} +func (m *SecurityDefinitions) XXX_Size() int { + return xxx_messageInfo_SecurityDefinitions.Size(m) +} +func (m *SecurityDefinitions) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityDefinitions.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityDefinitions proto.InternalMessageInfo func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { if m != nil { @@ -3791,43 +4529,76 @@ type SecurityDefinitionsItem struct { // *SecurityDefinitionsItem_Oauth2PasswordSecurity // *SecurityDefinitionsItem_Oauth2ApplicationSecurity // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{53} } -func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } -func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } -func (*SecurityDefinitionsItem) ProtoMessage() {} -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } +func (m *SecurityDefinitionsItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityDefinitionsItem.Unmarshal(m, b) +} +func (m *SecurityDefinitionsItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityDefinitionsItem.Marshal(b, m, deterministic) +} +func (m *SecurityDefinitionsItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityDefinitionsItem.Merge(m, src) +} +func (m *SecurityDefinitionsItem) XXX_Size() int { + return xxx_messageInfo_SecurityDefinitionsItem.Size(m) +} +func (m *SecurityDefinitionsItem) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityDefinitionsItem.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityDefinitionsItem proto.InternalMessageInfo type isSecurityDefinitionsItem_Oneof interface { isSecurityDefinitionsItem_Oneof() } type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` } + type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` } func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { if m != nil { @@ -3878,9 +4649,9 @@ func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCod return nil } -// XXX_OneofFuncs is for the internal use of the proto package. -func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofWrappers() []interface{} { + return []interface{}{ (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), (*SecurityDefinitionsItem_ApiKeySecurity)(nil), (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), @@ -3890,152 +4661,37 @@ func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *pro } } -func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - b.EncodeVarint(1<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_ApiKeySecurity: - b.EncodeVarint(2<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - b.EncodeVarint(3<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - b.EncodeVarint(5<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { - return err - } - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - b.EncodeVarint(6<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { - return err - } - case nil: - default: - return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) - } - return nil -} - -func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*SecurityDefinitionsItem) - switch tag { - case 1: // oneof.basic_authentication_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(BasicAuthenticationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} - return true, err - case 2: // oneof.api_key_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(ApiKeySecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} - return true, err - case 3: // oneof.oauth2_implicit_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ImplicitSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} - return true, err - case 4: // oneof.oauth2_password_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2PasswordSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} - return true, err - case 5: // oneof.oauth2_application_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2ApplicationSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} - return true, err - case 6: // oneof.oauth2_access_code_security - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - msg := new(Oauth2AccessCodeSecurity) - err := b.DecodeMessage(msg) - m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} - return true, err - default: - return false, nil - } -} - -func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { - m := msg.(*SecurityDefinitionsItem) - // oneof - switch x := m.Oneof.(type) { - case *SecurityDefinitionsItem_BasicAuthenticationSecurity: - s := proto.Size(x.BasicAuthenticationSecurity) - n += proto.SizeVarint(1<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_ApiKeySecurity: - s := proto.Size(x.ApiKeySecurity) - n += proto.SizeVarint(2<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: - s := proto.Size(x.Oauth2ImplicitSecurity) - n += proto.SizeVarint(3<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2PasswordSecurity: - s := proto.Size(x.Oauth2PasswordSecurity) - n += proto.SizeVarint(4<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: - s := proto.Size(x.Oauth2ApplicationSecurity) - n += proto.SizeVarint(5<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: - s := proto.Size(x.Oauth2AccessCodeSecurity) - n += proto.SizeVarint(6<<3 | proto.WireBytes) - n += proto.SizeVarint(uint64(s)) - n += s - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -type SecurityRequirement struct { - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{54} } -func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } -func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } -func (*SecurityRequirement) ProtoMessage() {} -func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } +func (m *SecurityRequirement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SecurityRequirement.Unmarshal(m, b) +} +func (m *SecurityRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SecurityRequirement.Marshal(b, m, deterministic) +} +func (m *SecurityRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityRequirement.Merge(m, src) +} +func (m *SecurityRequirement) XXX_Size() int { + return xxx_messageInfo_SecurityRequirement.Size(m) +} +func (m *SecurityRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityRequirement proto.InternalMessageInfo func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { if m != nil { @@ -4045,13 +4701,36 @@ func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { } type StringArray struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{55} +} + +func (m *StringArray) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringArray.Unmarshal(m, b) +} +func (m *StringArray) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringArray.Marshal(b, m, deterministic) +} +func (m *StringArray) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringArray.Merge(m, src) +} +func (m *StringArray) XXX_Size() int { + return xxx_messageInfo_StringArray.Size(m) +} +func (m *StringArray) XXX_DiscardUnknown() { + xxx_messageInfo_StringArray.DiscardUnknown(m) } -func (m *StringArray) Reset() { *m = StringArray{} } -func (m *StringArray) String() string { return proto.CompactTextString(m) } -func (*StringArray) ProtoMessage() {} -func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } +var xxx_messageInfo_StringArray proto.InternalMessageInfo func (m *StringArray) GetValue() []string { if m != nil { @@ -4061,16 +4740,39 @@ func (m *StringArray) GetValue() []string { } type Tag struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Tag) Reset() { *m = Tag{} } -func (m *Tag) String() string { return proto.CompactTextString(m) } -func (*Tag) ProtoMessage() {} -func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{56} +} + +func (m *Tag) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Tag.Unmarshal(m, b) +} +func (m *Tag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Tag.Marshal(b, m, deterministic) +} +func (m *Tag) XXX_Merge(src proto.Message) { + xxx_messageInfo_Tag.Merge(m, src) +} +func (m *Tag) XXX_Size() int { + return xxx_messageInfo_Tag.Size(m) +} +func (m *Tag) XXX_DiscardUnknown() { + xxx_messageInfo_Tag.DiscardUnknown(m) +} + +var xxx_messageInfo_Tag proto.InternalMessageInfo func (m *Tag) GetName() string { if m != nil { @@ -4101,13 +4803,36 @@ func (m *Tag) GetVendorExtension() []*NamedAny { } type TypeItem struct { - Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{57} +} + +func (m *TypeItem) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TypeItem.Unmarshal(m, b) +} +func (m *TypeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TypeItem.Marshal(b, m, deterministic) +} +func (m *TypeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeItem.Merge(m, src) +} +func (m *TypeItem) XXX_Size() int { + return xxx_messageInfo_TypeItem.Size(m) +} +func (m *TypeItem) XXX_DiscardUnknown() { + xxx_messageInfo_TypeItem.DiscardUnknown(m) } -func (m *TypeItem) Reset() { *m = TypeItem{} } -func (m *TypeItem) String() string { return proto.CompactTextString(m) } -func (*TypeItem) ProtoMessage() {} -func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } +var xxx_messageInfo_TypeItem proto.InternalMessageInfo func (m *TypeItem) GetValue() []string { if m != nil { @@ -4118,13 +4843,36 @@ func (m *TypeItem) GetValue() []string { // Any property starting with x- is valid. type VendorExtension struct { - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{58} } -func (m *VendorExtension) Reset() { *m = VendorExtension{} } -func (m *VendorExtension) String() string { return proto.CompactTextString(m) } -func (*VendorExtension) ProtoMessage() {} -func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } +func (m *VendorExtension) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VendorExtension.Unmarshal(m, b) +} +func (m *VendorExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VendorExtension.Marshal(b, m, deterministic) +} +func (m *VendorExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_VendorExtension.Merge(m, src) +} +func (m *VendorExtension) XXX_Size() int { + return xxx_messageInfo_VendorExtension.Size(m) +} +func (m *VendorExtension) XXX_DiscardUnknown() { + xxx_messageInfo_VendorExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_VendorExtension proto.InternalMessageInfo func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { if m != nil { @@ -4134,18 +4882,41 @@ func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { } type Xml struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { + return fileDescriptor_336adc04ae589d92, []int{59} +} + +func (m *Xml) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Xml.Unmarshal(m, b) +} +func (m *Xml) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Xml.Marshal(b, m, deterministic) +} +func (m *Xml) XXX_Merge(src proto.Message) { + xxx_messageInfo_Xml.Merge(m, src) +} +func (m *Xml) XXX_Size() int { + return xxx_messageInfo_Xml.Size(m) +} +func (m *Xml) XXX_DiscardUnknown() { + xxx_messageInfo_Xml.DiscardUnknown(m) } -func (m *Xml) Reset() { *m = Xml{} } -func (m *Xml) String() string { return proto.CompactTextString(m) } -func (*Xml) ProtoMessage() {} -func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } +var xxx_messageInfo_Xml proto.InternalMessageInfo func (m *Xml) GetName() string { if m != nil { @@ -4252,9 +5023,9 @@ func init() { proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") } -func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor_336adc04ae589d92) } -var fileDescriptor0 = []byte{ +var fileDescriptor_336adc04ae589d92 = []byte{ // 3129 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index c954a2d9b..25affd063 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -17,13 +17,14 @@ package compiler import ( "errors" "fmt" - "gopkg.in/yaml.v2" "io/ioutil" "log" "net/http" "net/url" "path/filepath" "strings" + + yaml "gopkg.in/yaml.v2" ) var fileCache map[string][]byte @@ -31,6 +32,8 @@ var infoCache map[string]interface{} var count int64 var verboseReader = false +var fileCacheEnable = true +var infoCacheEnable = true func initializeFileCache() { if fileCache == nil { @@ -44,29 +47,67 @@ func initializeInfoCache() { } } +func DisableFileCache() { + fileCacheEnable = false +} + +func DisableInfoCache() { + infoCacheEnable = false +} + +func RemoveFromFileCache(fileurl string) { + if !fileCacheEnable { + return + } + initializeFileCache() + delete(fileCache, fileurl) +} + +func RemoveFromInfoCache(filename string) { + if !infoCacheEnable { + return + } + initializeInfoCache() + delete(infoCache, filename) +} + +func GetInfoCache() map[string]interface{} { + if infoCache == nil { + initializeInfoCache() + } + return infoCache +} + +func ClearInfoCache() { + infoCache = make(map[string]interface{}) +} + // FetchFile gets a specified file from the local filesystem or a remote location. func FetchFile(fileurl string) ([]byte, error) { + var bytes []byte initializeFileCache() - bytes, ok := fileCache[fileurl] - if ok { + if fileCacheEnable { + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } if verboseReader { - log.Printf("Cache hit %s", fileurl) + log.Printf("Fetching %s", fileurl) } - return bytes, nil - } - if verboseReader { - log.Printf("Fetching %s", fileurl) } response, err := http.Get(fileurl) if err != nil { return nil, err } + defer response.Body.Close() if response.StatusCode != 200 { return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) } - defer response.Body.Close() bytes, err = ioutil.ReadAll(response.Body) - if err == nil { + if fileCacheEnable && err == nil { fileCache[fileurl] = bytes } return bytes, err @@ -95,22 +136,24 @@ func ReadBytesForFile(filename string) ([]byte, error) { // ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { initializeInfoCache() - cachedInfo, ok := infoCache[filename] - if ok { + if infoCacheEnable { + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } if verboseReader { - log.Printf("Cache hit info for file %s", filename) + log.Printf("Reading info for file %s", filename) } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) } var info yaml.MapSlice err := yaml.Unmarshal(bytes, &info) if err != nil { return nil, err } - if len(filename) > 0 { + if infoCacheEnable && len(filename) > 0 { infoCache[filename] = info } return info, nil @@ -119,7 +162,7 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { // ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. func ReadInfoForRef(basefile string, ref string) (interface{}, error) { initializeInfoCache() - { + if infoCacheEnable { info, ok := infoCache[ref] if ok { if verboseReader { @@ -127,16 +170,20 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { } return info, nil } - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } } count = count + 1 basedir, _ := filepath.Split(basefile) parts := strings.Split(ref, "#") var filename string if parts[0] != "" { - filename = basedir + parts[0] + filename = parts[0] + if _, err := url.ParseRequestURI(parts[0]); err != nil { + // It is not an URL, so the file is local + filename = basedir + parts[0] + } } else { filename = basefile } @@ -170,6 +217,8 @@ func ReadInfoForRef(basefile string, ref string) (interface{}, error) { } } } - infoCache[ref] = info + if infoCacheEnable { + infoCache[ref] = info + } return info, nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh deleted file mode 100644 index 68d02a02a..000000000 --- a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh +++ /dev/null @@ -1,5 +0,0 @@ -go get github.com/golang/protobuf/protoc-gen-go - -protoc \ ---go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto - diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index 749ff7841..432dc06e6 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,24 +1,14 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: extension.proto +// source: extensions/extension.proto -/* -Package openapiextension_v1 is a generated protocol buffer package. - -It is generated from these files: - extension.proto - -It has these top-level messages: - Version - ExtensionHandlerRequest - ExtensionHandlerResponse - Wrapper -*/ package openapiextension_v1 -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/any" +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -29,22 +19,45 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package // The version number of OpenAPI compiler. type Version struct { - Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{0} +} + +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (m *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(m, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) } -func (m *Version) Reset() { *m = Version{} } -func (m *Version) String() string { return proto.CompactTextString(m) } -func (*Version) ProtoMessage() {} -func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_Version proto.InternalMessageInfo func (m *Version) GetMajor() int32 { if m != nil { @@ -78,15 +91,38 @@ func (m *Version) GetSuffix() string { type ExtensionHandlerRequest struct { // The OpenAPI descriptions that were explicitly listed on the command line. // The specifications will appear in the order they are specified to gnostic. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` // The version number of openapi compiler. - CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{1} +} + +func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b) +} +func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic) +} +func (m *ExtensionHandlerRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionHandlerRequest.Merge(m, src) +} +func (m *ExtensionHandlerRequest) XXX_Size() int { + return xxx_messageInfo_ExtensionHandlerRequest.Size(m) +} +func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m) } -func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } -func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerRequest) ProtoMessage() {} -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { if m != nil { @@ -105,7 +141,7 @@ func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { // The extensions writes an encoded ExtensionHandlerResponse to stdout. type ExtensionHandlerResponse struct { // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` // Error message. If non-empty, the extension handling failed. // The extension handler process should exit with status code zero // even if it reports an error in this way. @@ -115,15 +151,38 @@ type ExtensionHandlerResponse struct { // itself -- such as the input Document being unparseable -- should be // reported by writing a message to stderr and exiting with a non-zero // status code. - Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"` // text output - Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` + Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } -func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } -func (*ExtensionHandlerResponse) ProtoMessage() {} -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{2} +} + +func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b) +} +func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic) +} +func (m *ExtensionHandlerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionHandlerResponse.Merge(m, src) +} +func (m *ExtensionHandlerResponse) XXX_Size() int { + return xxx_messageInfo_ExtensionHandlerResponse.Size(m) +} +func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo func (m *ExtensionHandlerResponse) GetHandled() bool { if m != nil { @@ -139,7 +198,7 @@ func (m *ExtensionHandlerResponse) GetError() []string { return nil } -func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { +func (m *ExtensionHandlerResponse) GetValue() *any.Any { if m != nil { return m.Value } @@ -148,17 +207,40 @@ func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { type Wrapper struct { // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` // Name of the extension - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` // Must be a valid yaml for the proto - Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Wrapper) Reset() { *m = Wrapper{} } -func (m *Wrapper) String() string { return proto.CompactTextString(m) } -func (*Wrapper) ProtoMessage() {} -func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { + return fileDescriptor_661e47e790f76671, []int{3} +} + +func (m *Wrapper) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Wrapper.Unmarshal(m, b) +} +func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic) +} +func (m *Wrapper) XXX_Merge(src proto.Message) { + xxx_messageInfo_Wrapper.Merge(m, src) +} +func (m *Wrapper) XXX_Size() int { + return xxx_messageInfo_Wrapper.Size(m) +} +func (m *Wrapper) XXX_DiscardUnknown() { + xxx_messageInfo_Wrapper.DiscardUnknown(m) +} + +var xxx_messageInfo_Wrapper proto.InternalMessageInfo func (m *Wrapper) GetVersion() string { if m != nil { @@ -188,31 +270,31 @@ func init() { proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") } -func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, - 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, - 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, - 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, - 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, - 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, - 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, - 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, - 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, - 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, - 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, - 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, - 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, - 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, - 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, - 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, - 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, - 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, - 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, - 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, - 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, - 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, - 0x80, 0x51, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("extensions/extension.proto", fileDescriptor_661e47e790f76671) } + +var fileDescriptor_661e47e790f76671 = []byte{ + // 362 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xeb, 0x40, + 0x18, 0x85, 0x49, 0xbf, 0x72, 0x33, 0x97, 0xdb, 0x2b, 0x63, 0xd1, 0x58, 0x5c, 0x94, 0x80, 0x50, + 0x44, 0xa6, 0x54, 0xc1, 0x7d, 0x0b, 0x45, 0xdd, 0xd8, 0x32, 0x8b, 0xba, 0xb3, 0x4c, 0xd3, 0xb7, + 0x69, 0x24, 0x99, 0x19, 0x27, 0x1f, 0xb6, 0x7f, 0xc5, 0xa5, 0xbf, 0x54, 0x32, 0x93, 0xc4, 0x85, + 0xba, 0x9b, 0xf3, 0x70, 0xda, 0xf7, 0x9c, 0x13, 0xd4, 0x87, 0x7d, 0x0a, 0x3c, 0x09, 0x05, 0x4f, + 0x46, 0xf5, 0x93, 0x48, 0x25, 0x52, 0x81, 0x8f, 0x85, 0x04, 0xce, 0x64, 0xf8, 0xc5, 0xf3, 0x71, + 0xff, 0x2c, 0x10, 0x22, 0x88, 0x60, 0xa4, 0x2d, 0xeb, 0x6c, 0x3b, 0x62, 0xfc, 0x60, 0xfc, 0x9e, + 0x8f, 0xec, 0x25, 0xa8, 0xc2, 0x88, 0x7b, 0xa8, 0x1d, 0xb3, 0x17, 0xa1, 0x5c, 0x6b, 0x60, 0x0d, + 0xdb, 0xd4, 0x08, 0x4d, 0x43, 0x2e, 0x94, 0xdb, 0x28, 0x69, 0x21, 0x0a, 0x2a, 0x59, 0xea, 0xef, + 0xdc, 0xa6, 0xa1, 0x5a, 0xe0, 0x13, 0xd4, 0x49, 0xb2, 0xed, 0x36, 0xdc, 0xbb, 0xad, 0x81, 0x35, + 0x74, 0x68, 0xa9, 0xbc, 0x77, 0x0b, 0x9d, 0xce, 0xaa, 0x40, 0xf7, 0x8c, 0x6f, 0x22, 0x50, 0x14, + 0x5e, 0x33, 0x48, 0x52, 0x7c, 0x8b, 0xec, 0x37, 0xc5, 0xa4, 0x04, 0x73, 0xf7, 0xef, 0xf5, 0x39, + 0xf9, 0xa1, 0x02, 0x79, 0x32, 0x1e, 0x5a, 0x99, 0xf1, 0x1d, 0x3a, 0xf2, 0x45, 0x2c, 0xc3, 0x08, + 0xd4, 0x2a, 0x37, 0x0d, 0x74, 0x98, 0xdf, 0xfe, 0xa0, 0x6c, 0x49, 0xff, 0x57, 0xbf, 0x2a, 0x81, + 0x97, 0x23, 0xf7, 0x7b, 0xb6, 0x44, 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd3, 0x68, 0xa3, 0xc3, + 0xfd, 0xa1, 0x95, 0x2c, 0x06, 0x00, 0xa5, 0xf4, 0x2c, 0xcd, 0xa1, 0x43, 0x8d, 0xc0, 0x97, 0xa8, + 0x9d, 0xb3, 0x28, 0x83, 0x32, 0x49, 0x8f, 0x98, 0xe1, 0x49, 0x35, 0x3c, 0x99, 0xf0, 0x03, 0x35, + 0x16, 0xef, 0x19, 0xd9, 0x65, 0xa9, 0xe2, 0x4c, 0x55, 0xc1, 0xd2, 0xc3, 0x55, 0x12, 0x5f, 0xa0, + 0x6e, 0xdd, 0x62, 0xc5, 0x59, 0x0c, 0xfa, 0x33, 0x38, 0xf4, 0x5f, 0x4d, 0x1f, 0x59, 0x0c, 0x18, + 0xa3, 0xd6, 0x81, 0xc5, 0x91, 0x3e, 0xeb, 0x50, 0xfd, 0x9e, 0x5e, 0xa1, 0xae, 0x50, 0x01, 0x09, + 0xb8, 0x48, 0xd2, 0xd0, 0x27, 0xf9, 0x78, 0x8a, 0xe7, 0x12, 0xf8, 0x64, 0xf1, 0x50, 0xd7, 0x5d, + 0x8e, 0x17, 0xd6, 0x47, 0xa3, 0x39, 0x9f, 0xcc, 0xd6, 0x1d, 0x1d, 0xf1, 0xe6, 0x33, 0x00, 0x00, + 0xff, 0xff, 0xeb, 0xf3, 0xfa, 0x65, 0x5c, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/kisielk/errcheck/.travis.yml b/vendor/github.com/kisielk/errcheck/.travis.yml new file mode 100644 index 000000000..5c9f68ad2 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/.travis.yml @@ -0,0 +1,11 @@ +language: go +sudo: false + +matrix: + include: + - go: "1.9" + - go: "1.10" + - go: "tip" + +script: + - go test -race ./... diff --git a/vendor/github.com/kisielk/errcheck/LICENSE b/vendor/github.com/kisielk/errcheck/LICENSE new file mode 100644 index 000000000..a2b16b5bd --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Kamil Kisiel + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/kisielk/errcheck/README.md b/vendor/github.com/kisielk/errcheck/README.md new file mode 100644 index 000000000..55bbc4149 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/README.md @@ -0,0 +1,123 @@ +# errcheck + +errcheck is a program for checking for unchecked errors in go programs. + +[![Build Status](https://travis-ci.org/kisielk/errcheck.png?branch=master)](https://travis-ci.org/kisielk/errcheck) + +## Install + + go get -u github.com/kisielk/errcheck + +errcheck requires Go 1.9 or newer and depends on the package go/packages from the golang.org/x/tools repository. + +## Use + +For basic usage, just give the package path of interest as the first argument: + + errcheck github.com/kisielk/errcheck/testdata + +To check all packages beneath the current directory: + + errcheck ./... + +Or check all packages in your $GOPATH and $GOROOT: + + errcheck all + +errcheck also recognizes the following command-line options: + +The `-tags` flag takes a space-separated list of build tags, just like `go +build`. If you are using any custom build tags in your code base, you may need +to specify the relevant tags here. + +The `-asserts` flag enables checking for ignored type assertion results. It +takes no arguments. + +The `-blank` flag enables checking for assignments of errors to the +blank identifier. It takes no arguments. + + +## Excluding functions + +Use the `-exclude` flag to specify a path to a file containing a list of functions to +be excluded. + + errcheck -exclude errcheck_excludes.txt path/to/package + +The file should contain one function signature per line. The format for function signatures is +`package.FunctionName` while for methods it's `(package.Receiver).MethodName` for value receivers +and `(*package.Receiver).MethodName` for pointer receivers. If the function name is followed by string of form `(TYPE)`, then +the the function call is excluded only if the type of the first argument is `TYPE`. It also accepts a special suffix +`(os.Stdout)` and `(os.Stderr)`, which excludes the function only when the first argument is a literal `os.Stdout` or `os.Stderr`. + +An example of an exclude file is: + + io/ioutil.ReadFile + io.Copy(*bytes.Buffer) + io.Copy(os.Stdout) + (*net/http.Client).Do + +The exclude list is combined with an internal list for functions in the Go standard library that +have an error return type but are documented to never return an error. + + +### The deprecated method + +The `-ignore` flag takes a comma-separated list of pairs of the form package:regex. +For each package, the regex describes which functions to ignore within that package. +The package may be omitted to have the regex apply to all packages. + +For example, you may wish to ignore common operations like Read and Write: + + errcheck -ignore '[rR]ead|[wW]rite' path/to/package + +or you may wish to ignore common functions like the `print` variants in `fmt`: + + errcheck -ignore 'fmt:[FS]?[Pp]rint*' path/to/package + +The `-ignorepkg` flag takes a comma-separated list of package import paths +to ignore: + + errcheck -ignorepkg 'fmt,encoding/binary' path/to/package + +Note that this is equivalent to: + + errcheck -ignore 'fmt:.*,encoding/binary:.*' path/to/package + +If a regex is provided for a package `pkg` via `-ignore`, and `pkg` also appears +in the list of packages passed to `-ignorepkg`, the latter takes precedence; +that is, all functions within `pkg` will be ignored. + +Note that by default the `fmt` package is ignored entirely, unless a regex is +specified for it. To disable this, specify a regex that matches nothing: + + errcheck -ignore 'fmt:a^' path/to/package + +The `-ignoretests` flag disables checking of `_test.go` files. It takes +no arguments. + +## Cgo + +Currently errcheck is unable to check packages that import "C" due to limitations in the importer when used with versions earlier than Go 1.11. + +However, you can use errcheck on packages that depend on those which use cgo. In order for this to work you need to go install the cgo dependencies before running errcheck on the dependent packages. + +See https://github.com/kisielk/errcheck/issues/16 for more details. + +## Exit Codes + +errcheck returns 1 if any problems were found in the checked files. +It returns 2 if there were any other failures. + +# Editor Integration + +## Emacs + +[go-errcheck.el](https://github.com/dominikh/go-errcheck.el) +integrates errcheck with Emacs by providing a `go-errcheck` command +and customizable variables to automatically pass flags to errcheck. + +## Vim + +[vim-go](https://github.com/fatih/vim-go) can run errcheck via both its `:GoErrCheck` +and `:GoMetaLinter` commands. diff --git a/vendor/github.com/kisielk/errcheck/go.mod b/vendor/github.com/kisielk/errcheck/go.mod new file mode 100644 index 000000000..e693836d4 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/go.mod @@ -0,0 +1,3 @@ +module github.com/kisielk/errcheck + +require golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563 diff --git a/vendor/github.com/kisielk/errcheck/go.sum b/vendor/github.com/kisielk/errcheck/go.sum new file mode 100644 index 000000000..e7e69c992 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/go.sum @@ -0,0 +1,4 @@ +golang.org/x/tools v0.0.0-20180803180156-3c07937fe18c h1:Z6Xcg33osoOLCiz/EFPHedK6zJ9nl1KK4WH/LlykMxs= +golang.org/x/tools v0.0.0-20180803180156-3c07937fe18c/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563 h1:NIou6eNFigscvKJmsbyez16S2cIS6idossORlFtSt2E= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/kisielk/errcheck/internal/errcheck/embedded_walker.go b/vendor/github.com/kisielk/errcheck/internal/errcheck/embedded_walker.go new file mode 100644 index 000000000..3b3192580 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/internal/errcheck/embedded_walker.go @@ -0,0 +1,144 @@ +package errcheck + +import ( + "fmt" + "go/types" +) + +// walkThroughEmbeddedInterfaces returns a slice of Interfaces that +// we need to walk through in order to reach the actual definition, +// in an Interface, of the method selected by the given selection. +// +// false will be returned in the second return value if: +// - the right side of the selection is not a function +// - the actual definition of the function is not in an Interface +// +// The returned slice will contain all the interface types that need +// to be walked through to reach the actual definition. +// +// For example, say we have: +// +// type Inner interface {Method()} +// type Middle interface {Inner} +// type Outer interface {Middle} +// type T struct {Outer} +// type U struct {T} +// type V struct {U} +// +// And then the selector: +// +// V.Method +// +// We'll return [Outer, Middle, Inner] by first walking through the embedded structs +// until we reach the Outer interface, then descending through the embedded interfaces +// until we find the one that actually explicitly defines Method. +func walkThroughEmbeddedInterfaces(sel *types.Selection) ([]types.Type, bool) { + fn, ok := sel.Obj().(*types.Func) + if !ok { + return nil, false + } + + // Start off at the receiver. + currentT := sel.Recv() + + // First, we can walk through any Struct fields provided + // by the selection Index() method. We ignore the last + // index because it would give the method itself. + indexes := sel.Index() + for _, fieldIndex := range indexes[:len(indexes)-1] { + currentT = getTypeAtFieldIndex(currentT, fieldIndex) + } + + // Now currentT is either a type implementing the actual function, + // an Invalid type (if the receiver is a package), or an interface. + // + // If it's not an Interface, then we're done, as this function + // only cares about Interface-defined functions. + // + // If it is an Interface, we potentially need to continue digging until + // we find the Interface that actually explicitly defines the function. + interfaceT, ok := maybeUnname(currentT).(*types.Interface) + if !ok { + return nil, false + } + + // The first interface we pass through is this one we've found. We return the possibly + // wrapping types.Named because it is more useful to work with for callers. + result := []types.Type{currentT} + + // If this interface itself explicitly defines the given method + // then we're done digging. + for !explicitlyDefinesMethod(interfaceT, fn) { + // Otherwise, we find which of the embedded interfaces _does_ + // define the method, add it to our list, and loop. + namedInterfaceT, ok := getEmbeddedInterfaceDefiningMethod(interfaceT, fn) + if !ok { + // This should be impossible as long as we type-checked: either the + // interface or one of its embedded ones must implement the method... + panic(fmt.Sprintf("either %v or one of its embedded interfaces must implement %v", currentT, fn)) + } + result = append(result, namedInterfaceT) + interfaceT = namedInterfaceT.Underlying().(*types.Interface) + } + + return result, true +} + +func getTypeAtFieldIndex(startingAt types.Type, fieldIndex int) types.Type { + t := maybeUnname(maybeDereference(startingAt)) + s, ok := t.(*types.Struct) + if !ok { + panic(fmt.Sprintf("cannot get Field of a type that is not a struct, got a %T", t)) + } + + return s.Field(fieldIndex).Type() +} + +// getEmbeddedInterfaceDefiningMethod searches through any embedded interfaces of the +// passed interface searching for one that defines the given function. If found, the +// types.Named wrapping that interface will be returned along with true in the second value. +// +// If no such embedded interface is found, nil and false are returned. +func getEmbeddedInterfaceDefiningMethod(interfaceT *types.Interface, fn *types.Func) (*types.Named, bool) { + for i := 0; i < interfaceT.NumEmbeddeds(); i++ { + embedded := interfaceT.Embedded(i) + if definesMethod(embedded.Underlying().(*types.Interface), fn) { + return embedded, true + } + } + return nil, false +} + +func explicitlyDefinesMethod(interfaceT *types.Interface, fn *types.Func) bool { + for i := 0; i < interfaceT.NumExplicitMethods(); i++ { + if interfaceT.ExplicitMethod(i) == fn { + return true + } + } + return false +} + +func definesMethod(interfaceT *types.Interface, fn *types.Func) bool { + for i := 0; i < interfaceT.NumMethods(); i++ { + if interfaceT.Method(i) == fn { + return true + } + } + return false +} + +func maybeDereference(t types.Type) types.Type { + p, ok := t.(*types.Pointer) + if ok { + return p.Elem() + } + return t +} + +func maybeUnname(t types.Type) types.Type { + n, ok := t.(*types.Named) + if ok { + return n.Underlying() + } + return t +} diff --git a/vendor/github.com/kisielk/errcheck/internal/errcheck/errcheck.go b/vendor/github.com/kisielk/errcheck/internal/errcheck/errcheck.go new file mode 100644 index 000000000..a33500f07 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/internal/errcheck/errcheck.go @@ -0,0 +1,639 @@ +// Package errcheck is the library used to implement the errcheck command-line tool. +// +// Note: The API of this package has not been finalized and may change at any point. +package errcheck + +import ( + "bufio" + "errors" + "fmt" + "go/ast" + "go/token" + "go/types" + "os" + "regexp" + "sort" + "strings" + "sync" + + "golang.org/x/tools/go/packages" +) + +var errorType *types.Interface + +func init() { + errorType = types.Universe.Lookup("error").Type().Underlying().(*types.Interface) + +} + +var ( + // ErrNoGoFiles is returned when CheckPackage is run on a package with no Go source files + ErrNoGoFiles = errors.New("package contains no go source files") +) + +// UncheckedError indicates the position of an unchecked error return. +type UncheckedError struct { + Pos token.Position + Line string + FuncName string +} + +// UncheckedErrors is returned from the CheckPackage function if the package contains +// any unchecked errors. +// Errors should be appended using the Append method, which is safe to use concurrently. +type UncheckedErrors struct { + mu sync.Mutex + + // Errors is a list of all the unchecked errors in the package. + // Printing an error reports its position within the file and the contents of the line. + Errors []UncheckedError +} + +func (e *UncheckedErrors) Append(errors ...UncheckedError) { + e.mu.Lock() + defer e.mu.Unlock() + e.Errors = append(e.Errors, errors...) +} + +func (e *UncheckedErrors) Error() string { + return fmt.Sprintf("%d unchecked errors", len(e.Errors)) +} + +// Len is the number of elements in the collection. +func (e *UncheckedErrors) Len() int { return len(e.Errors) } + +// Swap swaps the elements with indexes i and j. +func (e *UncheckedErrors) Swap(i, j int) { e.Errors[i], e.Errors[j] = e.Errors[j], e.Errors[i] } + +type byName struct{ *UncheckedErrors } + +// Less reports whether the element with index i should sort before the element with index j. +func (e byName) Less(i, j int) bool { + ei, ej := e.Errors[i], e.Errors[j] + + pi, pj := ei.Pos, ej.Pos + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return ei.Line < ej.Line +} + +type Checker struct { + // ignore is a map of package names to regular expressions. Identifiers from a package are + // checked against its regular expressions and if any of the expressions match the call + // is not checked. + Ignore map[string]*regexp.Regexp + + // If blank is true then assignments to the blank identifier are also considered to be + // ignored errors. + Blank bool + + // If asserts is true then ignored type assertion results are also checked + Asserts bool + + // build tags + Tags []string + + Verbose bool + + // If true, checking of _test.go files is disabled + WithoutTests bool + + // If true, checking of files with generated code is disabled + WithoutGeneratedCode bool + + exclude map[string]bool +} + +func NewChecker() *Checker { + c := Checker{} + c.SetExclude(map[string]bool{}) + return &c +} + +func (c *Checker) SetExclude(l map[string]bool) { + c.exclude = map[string]bool{} + + // Default exclude for stdlib functions + for _, exc := range []string{ + // bytes + "(*bytes.Buffer).Write", + "(*bytes.Buffer).WriteByte", + "(*bytes.Buffer).WriteRune", + "(*bytes.Buffer).WriteString", + + // fmt + "fmt.Errorf", + "fmt.Print", + "fmt.Printf", + "fmt.Println", + "fmt.Fprint(*bytes.Buffer)", + "fmt.Fprintf(*bytes.Buffer)", + "fmt.Fprintln(*bytes.Buffer)", + "fmt.Fprint(*strings.Builder)", + "fmt.Fprintf(*strings.Builder)", + "fmt.Fprintln(*strings.Builder)", + "fmt.Fprint(os.Stderr)", + "fmt.Fprintf(os.Stderr)", + "fmt.Fprintln(os.Stderr)", + + // math/rand + "math/rand.Read", + "(*math/rand.Rand).Read", + + // strings + "(*strings.Builder).Write", + "(*strings.Builder).WriteByte", + "(*strings.Builder).WriteRune", + "(*strings.Builder).WriteString", + + // hash + "(hash.Hash).Write", + } { + c.exclude[exc] = true + } + + for k := range l { + c.exclude[k] = true + } +} + +func (c *Checker) logf(msg string, args ...interface{}) { + if c.Verbose { + fmt.Fprintf(os.Stderr, msg+"\n", args...) + } +} + +// loadPackages is used for testing. +var loadPackages = func(cfg *packages.Config, paths ...string) ([]*packages.Package, error) { + return packages.Load(cfg, paths...) +} + +func (c *Checker) load(paths ...string) ([]*packages.Package, error) { + cfg := &packages.Config{ + Mode: packages.LoadAllSyntax, + Tests: !c.WithoutTests, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(c.Tags, " "))}, + } + return loadPackages(cfg, paths...) +} + +var generatedCodeRegexp = regexp.MustCompile("^// Code generated .* DO NOT EDIT\\.$") + +func (c *Checker) shouldSkipFile(file *ast.File) bool { + if !c.WithoutGeneratedCode { + return false + } + + for _, cg := range file.Comments { + for _, comment := range cg.List { + if generatedCodeRegexp.MatchString(comment.Text) { + return true + } + } + } + + return false +} + +// CheckPackages checks packages for errors. +func (c *Checker) CheckPackages(paths ...string) error { + pkgs, err := c.load(paths...) + if err != nil { + return err + } + // Check for errors in the initial packages. + for _, pkg := range pkgs { + if len(pkg.Errors) > 0 { + return fmt.Errorf("errors while loading package %s: %v", pkg.ID, pkg.Errors) + } + } + + var wg sync.WaitGroup + u := &UncheckedErrors{} + for _, pkg := range pkgs { + wg.Add(1) + + go func(pkg *packages.Package) { + defer wg.Done() + c.logf("Checking %s", pkg.Types.Path()) + + v := &visitor{ + pkg: pkg, + ignore: c.Ignore, + blank: c.Blank, + asserts: c.Asserts, + lines: make(map[string][]string), + exclude: c.exclude, + errors: []UncheckedError{}, + } + + for _, astFile := range v.pkg.Syntax { + if c.shouldSkipFile(astFile) { + continue + } + ast.Walk(v, astFile) + } + u.Append(v.errors...) + }(pkg) + } + + wg.Wait() + if u.Len() > 0 { + // Sort unchecked errors and remove duplicates. Duplicates may occur when a file + // containing an unchecked error belongs to > 1 package. + sort.Sort(byName{u}) + uniq := u.Errors[:0] // compact in-place + for i, err := range u.Errors { + if i == 0 || err != u.Errors[i-1] { + uniq = append(uniq, err) + } + } + u.Errors = uniq + return u + } + return nil +} + +// visitor implements the errcheck algorithm +type visitor struct { + pkg *packages.Package + ignore map[string]*regexp.Regexp + blank bool + asserts bool + lines map[string][]string + exclude map[string]bool + + errors []UncheckedError +} + +// selectorAndFunc tries to get the selector and function from call expression. +// For example, given the call expression representing "a.b()", the selector +// is "a.b" and the function is "b" itself. +// +// The final return value will be true if it is able to do extract a selector +// from the call and look up the function object it refers to. +// +// If the call does not include a selector (like if it is a plain "f()" function call) +// then the final return value will be false. +func (v *visitor) selectorAndFunc(call *ast.CallExpr) (*ast.SelectorExpr, *types.Func, bool) { + sel, ok := call.Fun.(*ast.SelectorExpr) + if !ok { + return nil, nil, false + } + + fn, ok := v.pkg.TypesInfo.ObjectOf(sel.Sel).(*types.Func) + if !ok { + // Shouldn't happen, but be paranoid + return nil, nil, false + } + + return sel, fn, true + +} + +// fullName will return a package / receiver-type qualified name for a called function +// if the function is the result of a selector. Otherwise it will return +// the empty string. +// +// The name is fully qualified by the import path, possible type, +// function/method name and pointer receiver. +// +// For example, +// - for "fmt.Printf(...)" it will return "fmt.Printf" +// - for "base64.StdEncoding.Decode(...)" it will return "(*encoding/base64.Encoding).Decode" +// - for "myFunc()" it will return "" +func (v *visitor) fullName(call *ast.CallExpr) string { + _, fn, ok := v.selectorAndFunc(call) + if !ok { + return "" + } + + // TODO(dh): vendored packages will have /vendor/ in their name, + // thus not matching vendored standard library packages. If we + // want to support vendored stdlib packages, we need to implement + // FullName with our own logic. + return fn.FullName() +} + +// namesForExcludeCheck will return a list of fully-qualified function names +// from a function call that can be used to check against the exclusion list. +// +// If a function call is against a local function (like "myFunc()") then no +// names are returned. If the function is package-qualified (like "fmt.Printf()") +// then just that function's fullName is returned. +// +// Otherwise, we walk through all the potentially embeddded interfaces of the receiver +// the collect a list of type-qualified function names that we will check. +func (v *visitor) namesForExcludeCheck(call *ast.CallExpr) []string { + sel, fn, ok := v.selectorAndFunc(call) + if !ok { + return nil + } + + name := v.fullName(call) + if name == "" { + return nil + } + + // This will be missing for functions without a receiver (like fmt.Printf), + // so just fall back to the the function's fullName in that case. + selection, ok := v.pkg.TypesInfo.Selections[sel] + if !ok { + return []string{name} + } + + // This will return with ok false if the function isn't defined + // on an interface, so just fall back to the fullName. + ts, ok := walkThroughEmbeddedInterfaces(selection) + if !ok { + return []string{name} + } + + result := make([]string, len(ts)) + for i, t := range ts { + // Like in fullName, vendored packages will have /vendor/ in their name, + // thus not matching vendored standard library packages. If we + // want to support vendored stdlib packages, we need to implement + // additional logic here. + result[i] = fmt.Sprintf("(%s).%s", t.String(), fn.Name()) + } + return result +} + +// isBufferType checks if the expression type is a known in-memory buffer type. +func (v *visitor) argName(expr ast.Expr) string { + // Special-case literal "os.Stdout" and "os.Stderr" + if sel, ok := expr.(*ast.SelectorExpr); ok { + if obj := v.pkg.TypesInfo.ObjectOf(sel.Sel); obj != nil { + vr, ok := obj.(*types.Var) + if ok && vr.Pkg() != nil && vr.Pkg().Name() == "os" && (vr.Name() == "Stderr" || vr.Name() == "Stdout") { + return "os." + vr.Name() + } + } + } + t := v.pkg.TypesInfo.TypeOf(expr) + if t == nil { + return "" + } + return t.String() +} + +func (v *visitor) excludeCall(call *ast.CallExpr) bool { + var arg0 string + if len(call.Args) > 0 { + arg0 = v.argName(call.Args[0]) + } + for _, name := range v.namesForExcludeCheck(call) { + if v.exclude[name] { + return true + } + if arg0 != "" && v.exclude[name+"("+arg0+")"] { + return true + } + } + return false +} + +func (v *visitor) ignoreCall(call *ast.CallExpr) bool { + if v.excludeCall(call) { + return true + } + + // Try to get an identifier. + // Currently only supports simple expressions: + // 1. f() + // 2. x.y.f() + var id *ast.Ident + switch exp := call.Fun.(type) { + case (*ast.Ident): + id = exp + case (*ast.SelectorExpr): + id = exp.Sel + default: + // eg: *ast.SliceExpr, *ast.IndexExpr + } + + if id == nil { + return false + } + + // If we got an identifier for the function, see if it is ignored + if re, ok := v.ignore[""]; ok && re.MatchString(id.Name) { + return true + } + + if obj := v.pkg.TypesInfo.Uses[id]; obj != nil { + if pkg := obj.Pkg(); pkg != nil { + if re, ok := v.ignore[pkg.Path()]; ok { + return re.MatchString(id.Name) + } + + // if current package being considered is vendored, check to see if it should be ignored based + // on the unvendored path. + if nonVendoredPkg, ok := nonVendoredPkgPath(pkg.Path()); ok { + if re, ok := v.ignore[nonVendoredPkg]; ok { + return re.MatchString(id.Name) + } + } + } + } + + return false +} + +// nonVendoredPkgPath returns the unvendored version of the provided package path (or returns the provided path if it +// does not represent a vendored path). The second return value is true if the provided package was vendored, false +// otherwise. +func nonVendoredPkgPath(pkgPath string) (string, bool) { + lastVendorIndex := strings.LastIndex(pkgPath, "/vendor/") + if lastVendorIndex == -1 { + return pkgPath, false + } + return pkgPath[lastVendorIndex+len("/vendor/"):], true +} + +// errorsByArg returns a slice s such that +// len(s) == number of return types of call +// s[i] == true iff return type at position i from left is an error type +func (v *visitor) errorsByArg(call *ast.CallExpr) []bool { + switch t := v.pkg.TypesInfo.Types[call].Type.(type) { + case *types.Named: + // Single return + return []bool{isErrorType(t)} + case *types.Pointer: + // Single return via pointer + return []bool{isErrorType(t)} + case *types.Tuple: + // Multiple returns + s := make([]bool, t.Len()) + for i := 0; i < t.Len(); i++ { + switch et := t.At(i).Type().(type) { + case *types.Named: + // Single return + s[i] = isErrorType(et) + case *types.Pointer: + // Single return via pointer + s[i] = isErrorType(et) + default: + s[i] = false + } + } + return s + } + return []bool{false} +} + +func (v *visitor) callReturnsError(call *ast.CallExpr) bool { + if v.isRecover(call) { + return true + } + for _, isError := range v.errorsByArg(call) { + if isError { + return true + } + } + return false +} + +// isRecover returns true if the given CallExpr is a call to the built-in recover() function. +func (v *visitor) isRecover(call *ast.CallExpr) bool { + if fun, ok := call.Fun.(*ast.Ident); ok { + if _, ok := v.pkg.TypesInfo.Uses[fun].(*types.Builtin); ok { + return fun.Name == "recover" + } + } + return false +} + +func (v *visitor) addErrorAtPosition(position token.Pos, call *ast.CallExpr) { + pos := v.pkg.Fset.Position(position) + lines, ok := v.lines[pos.Filename] + if !ok { + lines = readfile(pos.Filename) + v.lines[pos.Filename] = lines + } + + line := "??" + if pos.Line-1 < len(lines) { + line = strings.TrimSpace(lines[pos.Line-1]) + } + + var name string + if call != nil { + name = v.fullName(call) + } + + v.errors = append(v.errors, UncheckedError{pos, line, name}) +} + +func readfile(filename string) []string { + var f, err = os.Open(filename) + if err != nil { + return nil + } + + var lines []string + var scanner = bufio.NewScanner(f) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + return lines +} + +func (v *visitor) Visit(node ast.Node) ast.Visitor { + switch stmt := node.(type) { + case *ast.ExprStmt: + if call, ok := stmt.X.(*ast.CallExpr); ok { + if !v.ignoreCall(call) && v.callReturnsError(call) { + v.addErrorAtPosition(call.Lparen, call) + } + } + case *ast.GoStmt: + if !v.ignoreCall(stmt.Call) && v.callReturnsError(stmt.Call) { + v.addErrorAtPosition(stmt.Call.Lparen, stmt.Call) + } + case *ast.DeferStmt: + if !v.ignoreCall(stmt.Call) && v.callReturnsError(stmt.Call) { + v.addErrorAtPosition(stmt.Call.Lparen, stmt.Call) + } + case *ast.AssignStmt: + if len(stmt.Rhs) == 1 { + // single value on rhs; check against lhs identifiers + if call, ok := stmt.Rhs[0].(*ast.CallExpr); ok { + if !v.blank { + break + } + if v.ignoreCall(call) { + break + } + isError := v.errorsByArg(call) + for i := 0; i < len(stmt.Lhs); i++ { + if id, ok := stmt.Lhs[i].(*ast.Ident); ok { + // We shortcut calls to recover() because errorsByArg can't + // check its return types for errors since it returns interface{}. + if id.Name == "_" && (v.isRecover(call) || isError[i]) { + v.addErrorAtPosition(id.NamePos, call) + } + } + } + } else if assert, ok := stmt.Rhs[0].(*ast.TypeAssertExpr); ok { + if !v.asserts { + break + } + if assert.Type == nil { + // type switch + break + } + if len(stmt.Lhs) < 2 { + // assertion result not read + v.addErrorAtPosition(stmt.Rhs[0].Pos(), nil) + } else if id, ok := stmt.Lhs[1].(*ast.Ident); ok && v.blank && id.Name == "_" { + // assertion result ignored + v.addErrorAtPosition(id.NamePos, nil) + } + } + } else { + // multiple value on rhs; in this case a call can't return + // multiple values. Assume len(stmt.Lhs) == len(stmt.Rhs) + for i := 0; i < len(stmt.Lhs); i++ { + if id, ok := stmt.Lhs[i].(*ast.Ident); ok { + if call, ok := stmt.Rhs[i].(*ast.CallExpr); ok { + if !v.blank { + continue + } + if v.ignoreCall(call) { + continue + } + if id.Name == "_" && v.callReturnsError(call) { + v.addErrorAtPosition(id.NamePos, call) + } + } else if assert, ok := stmt.Rhs[i].(*ast.TypeAssertExpr); ok { + if !v.asserts { + continue + } + if assert.Type == nil { + // Shouldn't happen anyway, no multi assignment in type switches + continue + } + v.addErrorAtPosition(id.NamePos, nil) + } + } + } + } + default: + } + return v +} + +func isErrorType(t types.Type) bool { + return types.Implements(t, errorType) +} diff --git a/vendor/github.com/kisielk/errcheck/main.go b/vendor/github.com/kisielk/errcheck/main.go new file mode 100644 index 000000000..7481c9e14 --- /dev/null +++ b/vendor/github.com/kisielk/errcheck/main.go @@ -0,0 +1,194 @@ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + + "github.com/kisielk/errcheck/internal/errcheck" +) + +const ( + exitCodeOk int = iota + exitUncheckedError + exitFatalError +) + +var abspath bool + +type ignoreFlag map[string]*regexp.Regexp + +func (f ignoreFlag) String() string { + pairs := make([]string, 0, len(f)) + for pkg, re := range f { + prefix := "" + if pkg != "" { + prefix = pkg + ":" + } + pairs = append(pairs, prefix+re.String()) + } + return fmt.Sprintf("%q", strings.Join(pairs, ",")) +} + +func (f ignoreFlag) Set(s string) error { + if s == "" { + return nil + } + for _, pair := range strings.Split(s, ",") { + colonIndex := strings.Index(pair, ":") + var pkg, re string + if colonIndex == -1 { + pkg = "" + re = pair + } else { + pkg = pair[:colonIndex] + re = pair[colonIndex+1:] + } + regex, err := regexp.Compile(re) + if err != nil { + return err + } + f[pkg] = regex + } + return nil +} + +type tagsFlag []string + +func (f *tagsFlag) String() string { + return fmt.Sprintf("%q", strings.Join(*f, " ")) +} + +func (f *tagsFlag) Set(s string) error { + if s == "" { + return nil + } + tags := strings.Split(s, " ") + if tags == nil { + return nil + } + for _, tag := range tags { + if tag != "" { + *f = append(*f, tag) + } + } + return nil +} + +var dotStar = regexp.MustCompile(".*") + +func reportUncheckedErrors(e *errcheck.UncheckedErrors, verbose bool) { + wd, err := os.Getwd() + if err != nil { + wd = "" + } + for _, uncheckedError := range e.Errors { + pos := uncheckedError.Pos.String() + if !abspath { + newPos, err := filepath.Rel(wd, pos) + if err == nil { + pos = newPos + } + } + + if verbose && uncheckedError.FuncName != "" { + fmt.Printf("%s:\t%s\t%s\n", pos, uncheckedError.FuncName, uncheckedError.Line) + } else { + fmt.Printf("%s:\t%s\n", pos, uncheckedError.Line) + } + } +} + +func mainCmd(args []string) int { + runtime.GOMAXPROCS(runtime.NumCPU()) + + checker := errcheck.NewChecker() + paths, err := parseFlags(checker, args) + if err != exitCodeOk { + return err + } + + if err := checker.CheckPackages(paths...); err != nil { + if e, ok := err.(*errcheck.UncheckedErrors); ok { + reportUncheckedErrors(e, checker.Verbose) + return exitUncheckedError + } else if err == errcheck.ErrNoGoFiles { + fmt.Fprintln(os.Stderr, err) + return exitCodeOk + } + fmt.Fprintf(os.Stderr, "error: failed to check packages: %s\n", err) + return exitFatalError + } + return exitCodeOk +} + +func parseFlags(checker *errcheck.Checker, args []string) ([]string, int) { + flags := flag.NewFlagSet(args[0], flag.ContinueOnError) + flags.BoolVar(&checker.Blank, "blank", false, "if true, check for errors assigned to blank identifier") + flags.BoolVar(&checker.Asserts, "asserts", false, "if true, check for ignored type assertion results") + flags.BoolVar(&checker.WithoutTests, "ignoretests", false, "if true, checking of _test.go files is disabled") + flags.BoolVar(&checker.WithoutGeneratedCode, "ignoregenerated", false, "if true, checking of files with generated code is disabled") + flags.BoolVar(&checker.Verbose, "verbose", false, "produce more verbose logging") + + flags.BoolVar(&abspath, "abspath", false, "print absolute paths to files") + + tags := tagsFlag{} + flags.Var(&tags, "tags", "space-separated list of build tags to include") + ignorePkg := flags.String("ignorepkg", "", "comma-separated list of package paths to ignore") + ignore := ignoreFlag(map[string]*regexp.Regexp{}) + flags.Var(ignore, "ignore", "[deprecated] comma-separated list of pairs of the form pkg:regex\n"+ + " the regex is used to ignore names within pkg.") + + var excludeFile string + flags.StringVar(&excludeFile, "exclude", "", "Path to a file containing a list of functions to exclude from checking") + + if err := flags.Parse(args[1:]); err != nil { + return nil, exitFatalError + } + + if excludeFile != "" { + exclude := make(map[string]bool) + fh, err := os.Open(excludeFile) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not read exclude file: %s\n", err) + return nil, exitFatalError + } + scanner := bufio.NewScanner(fh) + for scanner.Scan() { + name := scanner.Text() + exclude[name] = true + + if checker.Verbose { + fmt.Printf("Excluding %s\n", name) + } + } + if err := scanner.Err(); err != nil { + fmt.Fprintf(os.Stderr, "Could not read exclude file: %s\n", err) + return nil, exitFatalError + } + checker.SetExclude(exclude) + } + + checker.Tags = tags + for _, pkg := range strings.Split(*ignorePkg, ",") { + if pkg != "" { + ignore[pkg] = dotStar + } + } + checker.Ignore = ignore + + paths := flags.Args() + if len(paths) == 0 { + paths = []string{"."} + } + return paths, exitCodeOk +} + +func main() { + os.Exit(mainCmd(os.Args)) +} diff --git a/vendor/github.com/mattn/go-colorable/.travis.yml b/vendor/github.com/mattn/go-colorable/.travis.yml new file mode 100644 index 000000000..98db8f060 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/.travis.yml @@ -0,0 +1,9 @@ +language: go +go: + - tip + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken xnXqRGwgW3SXIguzxf90ZSK1GPYZPaGrw diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 000000000..91b5cef30 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/README.md b/vendor/github.com/mattn/go-colorable/README.md new file mode 100644 index 000000000..56729a92c --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/README.md @@ -0,0 +1,48 @@ +# go-colorable + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-colorable?status.svg)](http://godoc.org/github.com/mattn/go-colorable) +[![Build Status](https://travis-ci.org/mattn/go-colorable.svg?branch=master)](https://travis-ci.org/mattn/go-colorable) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-colorable/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-colorable?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-colorable)](https://goreportcard.com/report/mattn/go-colorable) + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-colorable/colorable_appengine.go b/vendor/github.com/mattn/go-colorable/colorable_appengine.go new file mode 100644 index 000000000..1f28d773d --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_appengine.go @@ -0,0 +1,29 @@ +// +build appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 000000000..887f203dc --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,30 @@ +// +build !windows +// +build !appengine + +package colorable + +import ( + "io" + "os" + + _ "github.com/mattn/go-isatty" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 000000000..404e10ca0 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,980 @@ +// +build windows +// +build !appengine + +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +const ( + genericRead = 0x80000000 + genericWrite = 0x40000000 +) + +const ( + consoleTextmodeBuffer = 0x1 +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") + procSetConsoleTitle = kernel32.NewProc("SetConsoleTitleW") + procCreateConsoleScreenBuffer = kernel32.NewProc("CreateConsoleScreenBuffer") +) + +// Writer provide colorable Writer to the console +type Writer struct { + out io.Writer + handle syscall.Handle + althandle syscall.Handle + oldattr word + oldpos coord + rest bytes.Buffer +} + +// NewColorable return new instance of Writer which handle escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// `\033]0;TITLESTR\007` +func doTitleSequence(er *bytes.Reader) error { + var c byte + var err error + + c, err = er.ReadByte() + if err != nil { + return err + } + if c != '0' && c != '2' { + return nil + } + c, err = er.ReadByte() + if err != nil { + return err + } + if c != ';' { + return nil + } + title := make([]byte, 0, 80) + for { + c, err = er.ReadByte() + if err != nil { + return err + } + if c == 0x07 || c == '\n' { + break + } + title = append(title, c) + } + if len(title) > 0 { + title8, err := syscall.UTF16PtrFromString(string(title)) + if err == nil { + procSetConsoleTitle.Call(uintptr(unsafe.Pointer(title8))) + } + } + return nil +} + +// Write write data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + handle := w.handle + + var er *bytes.Reader + if w.rest.Len() > 0 { + var rest bytes.Buffer + w.rest.WriteTo(&rest) + w.rest.Reset() + rest.Write(data) + er = bytes.NewReader(rest.Bytes()) + } else { + er = bytes.NewReader(data) + } + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + + switch c2 { + case '>': + continue + case ']': + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + if bytes.IndexByte(w.rest.Bytes(), 0x07) == -1 { + break loop + } + er = bytes.NewReader(w.rest.Bytes()[2:]) + err := doTitleSequence(er) + if err != nil { + break loop + } + w.rest.Reset() + continue + // https://github.com/mattn/go-colorable/issues/27 + case '7': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + continue + case '8': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + continue + case 0x5b: + // execute part after switch + default: + continue + } + + w.rest.WriteByte(c1) + w.rest.WriteByte(c2) + er.WriteTo(&w.rest) + + var buf bytes.Buffer + var m byte + for i, c := range w.rest.Bytes()[2:] { + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + er = bytes.NewReader(w.rest.Bytes()[2+i+1:]) + w.rest.Reset() + break + } + buf.Write([]byte(string(c))) + } + if m == 0 { + break loop + } + + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + if csbi.cursorPosition.x < 0 { + csbi.cursorPosition.x = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H', 'f': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + if buf.Len() > 0 { + token := strings.Split(buf.String(), ";") + switch len(token) { + case 1: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + csbi.cursorPosition.y = short(n1 - 1) + case 2: + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + } + } else { + csbi.cursorPosition.y = 0 + } + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + var count, written dword + var cursor coord + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.window.top-csbi.cursorPosition.y)*dword(csbi.size.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + count = dword(csbi.size.x) - dword(csbi.cursorPosition.x) + dword(csbi.size.y-csbi.cursorPosition.y)*dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n := 0 + if buf.Len() > 0 { + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + } + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + var count, written dword + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 1: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x - csbi.cursorPosition.x) + case 2: + cursor = coord{x: csbi.window.left, y: csbi.cursorPosition.y} + count = dword(csbi.size.x) + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case n == 22 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= foregroundRed + } + if g > 127 { + attr |= foregroundGreen + } + if b > 127 { + attr |= foregroundBlue + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else if len(token) == 5 && token[i+1] == "2" { + var r, g, b int + r, _ = strconv.Atoi(token[i+2]) + g, _ = strconv.Atoi(token[i+3]) + b, _ = strconv.Atoi(token[i+4]) + i += 4 + if r > 127 { + attr |= backgroundRed + } + if g > 127 { + attr |= backgroundGreen + } + if b > 127 { + attr |= backgroundBlue + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(handle), uintptr(attr)) + } + } + case 'h': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle == 0 { + h, _, _ := procCreateConsoleScreenBuffer.Call(uintptr(genericRead|genericWrite), 0, 0, uintptr(consoleTextmodeBuffer), 0, 0) + w.althandle = syscall.Handle(h) + if w.althandle != 0 { + handle = w.althandle + } + } + } + case 'l': + var ci consoleCursorInfo + cs := buf.String() + if cs == "5>" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?25" { + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&ci))) + } else if cs == "?1049" { + if w.althandle != 0 { + syscall.CloseHandle(w.althandle) + w.althandle = 0 + handle = w.handle + } + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + + return len(data), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + {0x000000, false, false, false, false}, + {0x000080, false, false, true, false}, + {0x008000, false, true, false, false}, + {0x008080, false, true, true, false}, + {0x800000, true, false, false, false}, + {0x800080, true, false, true, false}, + {0x808000, true, true, false, false}, + {0xc0c0c0, true, true, true, false}, + {0x808080, false, false, false, true}, + {0x0000ff, false, false, true, true}, + {0x00ff00, false, true, false, true}, + {0x00ffff, false, true, true, true}, + {0xff0000, true, false, false, true}, + {0xff00ff, true, false, true, true}, + {0xffff00, true, true, false, true}, + {0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/go.mod b/vendor/github.com/mattn/go-colorable/go.mod new file mode 100644 index 000000000..ef3ca9d4c --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-colorable + +require github.com/mattn/go-isatty v0.0.8 diff --git a/vendor/github.com/mattn/go-colorable/go.sum b/vendor/github.com/mattn/go-colorable/go.sum new file mode 100644 index 000000000..2c12960ec --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/go.sum @@ -0,0 +1,4 @@ +github.com/mattn/go-isatty v0.0.5 h1:tHXDdz1cpzGaovsTB+TVB8q90WEokoVmfMqoVcrLUgw= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 000000000..9721e16f4 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,55 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable hold writer but remove escape sequence. +type NonColorable struct { + out io.Writer +} + +// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write write data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + break loop + } + if c2 != 0x5b { + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + + return len(data), nil +} diff --git a/vendor/github.com/mattn/go-isatty/.travis.yml b/vendor/github.com/mattn/go-isatty/.travis.yml new file mode 100644 index 000000000..5597e026d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - tip + +os: + - linux + - osx + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -repotoken 3gHdORO5k5ziZcWMBxnd9LrMZaJs8m9x5 diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 000000000..65dc692b6 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 000000000..1e69004bb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[![Godoc Reference](https://godoc.org/github.com/mattn/go-isatty?status.svg)](http://godoc.org/github.com/mattn/go-isatty) +[![Build Status](https://travis-ci.org/mattn/go-isatty.svg?branch=master)](https://travis-ci.org/mattn/go-isatty) +[![Coverage Status](https://coveralls.io/repos/github/mattn/go-isatty/badge.svg?branch=master)](https://coveralls.io/github/mattn/go-isatty?branch=master) +[![Go Report Card](https://goreportcard.com/badge/mattn/go-isatty)](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 000000000..17d4f90eb --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/go.mod b/vendor/github.com/mattn/go-isatty/go.mod new file mode 100644 index 000000000..f310320c3 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.mod @@ -0,0 +1,3 @@ +module github.com/mattn/go-isatty + +require golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 diff --git a/vendor/github.com/mattn/go-isatty/go.sum b/vendor/github.com/mattn/go-isatty/go.sum new file mode 100644 index 000000000..426c8973c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/github.com/mattn/go-isatty/isatty_android.go b/vendor/github.com/mattn/go-isatty/isatty_android.go new file mode 100644 index 000000000..d3567cb5b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_android.go @@ -0,0 +1,23 @@ +// +build android + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 000000000..07e93039d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,24 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 000000000..ff714a376 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,15 @@ +// +build appengine js nacl + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on js and appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 000000000..bdd5c79a0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,22 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_tcgets.go b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go new file mode 100644 index 000000000..453b025d0 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_tcgets.go @@ -0,0 +1,19 @@ +// +build linux aix +// +build !appengine +// +build !android + +package isatty + +import "golang.org/x/sys/unix" + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS) + return err == nil +} + +// IsCygwinTerminal return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 000000000..af51cbcaa --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/mikefarah/yaml/v2/.travis.yml b/vendor/github.com/mikefarah/yaml/v2/.travis.yml new file mode 100644 index 000000000..9f556934d --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/github.com/mikefarah/yaml/v2/LICENSE b/vendor/github.com/mikefarah/yaml/v2/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/mikefarah/yaml/v2/LICENSE.libyaml b/vendor/github.com/mikefarah/yaml/v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mikefarah/yaml/v2/NOTICE b/vendor/github.com/mikefarah/yaml/v2/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/mikefarah/yaml/v2/README.md b/vendor/github.com/mikefarah/yaml/v2/README.md new file mode 100644 index 000000000..b50c6e877 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/github.com/mikefarah/yaml/v2/apic.go b/vendor/github.com/mikefarah/yaml/v2/apic.go new file mode 100644 index 000000000..1f7e87e67 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/github.com/mikefarah/yaml/v2/decode.go b/vendor/github.com/mikefarah/yaml/v2/decode.go new file mode 100644 index 000000000..71c366554 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/decode.go @@ -0,0 +1,776 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + // DefaultMapType type to unmarshal maps into, use MapSlice for ordered maps. + DefaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = DefaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: DefaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + good = d.unmarshal(n.alias, out) + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + an, ok := d.doc.anchors[n.value] + if ok && an.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + an, ok := d.doc.anchors[ni.value] + if ok && an.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/github.com/mikefarah/yaml/v2/emitterc.go b/vendor/github.com/mikefarah/yaml/v2/emitterc.go new file mode 100644 index 000000000..a1c2cc526 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/github.com/mikefarah/yaml/v2/encode.go b/vendor/github.com/mikefarah/yaml/v2/encode.go new file mode 100644 index 000000000..a14435e82 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/encode.go @@ -0,0 +1,362 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/github.com/mikefarah/yaml/v2/go.mod b/vendor/github.com/mikefarah/yaml/v2/go.mod new file mode 100644 index 000000000..a0d5f48b3 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/go.mod @@ -0,0 +1,5 @@ +module "github.com/mikefarah/yaml/v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/github.com/mikefarah/yaml/v2/parserc.go b/vendor/github.com/mikefarah/yaml/v2/parserc.go new file mode 100644 index 000000000..81d05dfe5 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/github.com/mikefarah/yaml/v2/readerc.go b/vendor/github.com/mikefarah/yaml/v2/readerc.go new file mode 100644 index 000000000..7c1f5fac3 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/github.com/mikefarah/yaml/v2/resolve.go b/vendor/github.com/mikefarah/yaml/v2/resolve.go new file mode 100644 index 000000000..6c151db6f --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/github.com/mikefarah/yaml/v2/scannerc.go b/vendor/github.com/mikefarah/yaml/v2/scannerc.go new file mode 100644 index 000000000..077fd1dd2 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/scannerc.go @@ -0,0 +1,2696 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + if parser.tokens_head == len(parser.tokens) { + // Queue is empty. + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/github.com/mikefarah/yaml/v2/sorter.go b/vendor/github.com/mikefarah/yaml/v2/sorter.go new file mode 100644 index 000000000..4c45e660a --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/github.com/mikefarah/yaml/v2/writerc.go b/vendor/github.com/mikefarah/yaml/v2/writerc.go new file mode 100644 index 000000000..a2dde608c --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/github.com/mikefarah/yaml/v2/yaml.go b/vendor/github.com/mikefarah/yaml/v2/yaml.go new file mode 100644 index 000000000..de85aa4cd --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/github.com/mikefarah/yaml/v2/yamlh.go b/vendor/github.com/mikefarah/yaml/v2/yamlh.go new file mode 100644 index 000000000..e25cee563 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/yamlh.go @@ -0,0 +1,738 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/github.com/mikefarah/yaml/v2/yamlprivateh.go b/vendor/github.com/mikefarah/yaml/v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/github.com/mikefarah/yaml/v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/github.com/mikefarah/yq/v2/.dockerignore b/vendor/github.com/mikefarah/yq/v2/.dockerignore new file mode 100644 index 000000000..ba077a403 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/.dockerignore @@ -0,0 +1 @@ +bin diff --git a/vendor/github.com/mikefarah/yq/v2/.gitignore b/vendor/github.com/mikefarah/yq/v2/.gitignore new file mode 100644 index 000000000..691180140 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/.gitignore @@ -0,0 +1,39 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +bin +build +build-done +.DS_Store + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go +coverage.out +*.exe +*.test +*.prof +yaml +vendor/ +tmp/ +cover/ +yq + +# snapcraft +parts/ +prime/ +.snapcraft/ +yq*.snap diff --git a/vendor/github.com/mikefarah/yq/v2/.travis.yml b/vendor/github.com/mikefarah/yq/v2/.travis.yml new file mode 100644 index 000000000..6f4d9cfde --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: + - 1.13.x +script: + - scripts/devtools.sh + - make local build diff --git a/vendor/github.com/mikefarah/yq/v2/Dockerfile b/vendor/github.com/mikefarah/yq/v2/Dockerfile new file mode 100644 index 000000000..32060934e --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/Dockerfile @@ -0,0 +1,23 @@ +FROM golang:1.13 as builder + +WORKDIR /go/src/mikefarah/yq + +# cache devtools +COPY ./scripts/devtools.sh /go/src/mikefarah/yq/scripts/devtools.sh +RUN ./scripts/devtools.sh + +COPY . /go/src/mikefarah/yq + +RUN CGO_ENABLED=0 make local build + +# Choose alpine as a base image to make this useful for CI, as many +# CI tools expect an interactive shell inside the container +FROM alpine:3.8 as production + +COPY --from=builder /go/src/mikefarah/yq/yq /usr/bin/yq +RUN chmod +x /usr/bin/yq + +ARG VERSION=none +LABEL version=${VERSION} + +WORKDIR /workdir diff --git a/vendor/github.com/mikefarah/yq/v2/Dockerfile.dev b/vendor/github.com/mikefarah/yq/v2/Dockerfile.dev new file mode 100644 index 000000000..fb7ddb68e --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/Dockerfile.dev @@ -0,0 +1,30 @@ +FROM golang:1.13 + +COPY scripts/devtools.sh /opt/devtools.sh + +RUN set -e -x \ + && /opt/devtools.sh + +# install mkdocs +RUN set -ex \ + && buildDeps=' \ + build-essential \ + python-dev \ + ' \ + && apt-get update && apt-get install -y --no-install-recommends \ + $buildDeps \ + python2.7 \ + python-setuptools \ + python-wheel \ + python-pip \ + && pip install --upgrade \ + pip \ + 'Markdown>=2.6.9' \ + 'mkdocs>=0.16.3' \ + 'mkdocs-material>=1.10.1' \ + 'markdown-include>=0.5.1' \ + && apt-get purge -y --auto-remove $buildDeps \ + && rm -rf /var/lib/apt/lists/* + +ENV CGO_ENABLED 0 +ENV GOPATH /go:/yq diff --git a/vendor/github.com/mikefarah/yq/v2/LICENSE b/vendor/github.com/mikefarah/yq/v2/LICENSE new file mode 100644 index 000000000..e2f1fc03d --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Mike Farah + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mikefarah/yq/v2/Makefile b/vendor/github.com/mikefarah/yq/v2/Makefile new file mode 100644 index 000000000..22b72069a --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/Makefile @@ -0,0 +1,114 @@ +MAKEFLAGS += --warn-undefined-variables +SHELL := /bin/bash +.SHELLFLAGS := -o pipefail -euc +.DEFAULT_GOAL := install + +include Makefile.variables + +.PHONY: help +help: + @echo 'Management commands for cicdtest:' + @echo + @echo 'Usage:' + @echo ' ## Develop / Test Commands' + @echo ' make build Build yq binary.' + @echo ' make install Install yq.' + @echo ' make xcompile Build cross-compiled binaries of yq.' + @echo ' make vendor Install dependencies to vendor directory.' + @echo ' make format Run code formatter.' + @echo ' make check Run static code analysis (lint).' + @echo ' make test Run tests on project.' + @echo ' make cover Run tests and capture code coverage metrics on project.' + @echo ' make clean Clean the directory tree of produced artifacts.' + @echo + @echo ' ## Utility Commands' + @echo ' make setup Configures Minishfit/Docker directory mounts.' + @echo + + +.PHONY: clean +clean: + @rm -rf bin build cover *.out + +## prefix before other make targets to run in your local dev environment +local: | quiet + @$(eval DOCKRUN= ) + @mkdir -p tmp + @touch tmp/dev_image_id +quiet: # this is silly but shuts up 'Nothing to be done for `local`' + @: + +prepare: tmp/dev_image_id +tmp/dev_image_id: Dockerfile.dev scripts/devtools.sh + @mkdir -p tmp + @docker rmi -f ${DEV_IMAGE} > /dev/null 2>&1 || true + @docker build -t ${DEV_IMAGE} -f Dockerfile.dev . + @docker inspect -f "{{ .ID }}" ${DEV_IMAGE} > tmp/dev_image_id + +# ---------------------------------------------- +# build +.PHONY: build +build: build/dev + +.PHONY: build/dev +build/dev: test *.go + @mkdir -p bin/ + ${DOCKRUN} go build --ldflags "$(LDFLAGS)" + ${DOCKRUN} bash ./scripts/acceptance.sh + +## Compile the project for multiple OS and Architectures. +xcompile: check + @rm -rf build/ + @mkdir -p build + ${DOCKRUN} bash ./scripts/xcompile.sh + @find build -type d -exec chmod 755 {} \; || : + @find build -type f -exec chmod 755 {} \; || : + +.PHONY: install +install: build + ${DOCKRUN} go install + +# Each of the fetch should be an entry within vendor.json; not currently included within project +.PHONY: vendor +vendor: tmp/dev_image_id + ${DOCKRUN} go mod vendor + +# ---------------------------------------------- +# develop and test + +.PHONY: format +format: vendor + ${DOCKRUN} bash ./scripts/format.sh + +.PHONY: check +check: format + ${DOCKRUN} bash ./scripts/check.sh + +.PHONY: test +test: check + ${DOCKRUN} bash ./scripts/test.sh + +.PHONY: cover +cover: check + @rm -rf cover/ + @mkdir -p cover + ${DOCKRUN} bash ./scripts/coverage.sh + @find cover -type d -exec chmod 755 {} \; || : + @find cover -type f -exec chmod 644 {} \; || : + +.PHONY: build-docs +build-docs: prepare mkdocs.yml mkdocs/* + ${DOCKRUN} mkdocs build + @find docs -type d -exec chmod 755 {} \; || : + @find docs -type f -exec chmod 644 {} \; || : + +.PHONY: release +release: xcompile + ${DOCKRUN} bash ./scripts/publish.sh + +# ---------------------------------------------- +# utilities + +.PHONY: setup +setup: + @bash ./scripts/setup.sh diff --git a/vendor/github.com/mikefarah/yq/v2/Makefile.variables b/vendor/github.com/mikefarah/yq/v2/Makefile.variables new file mode 100644 index 000000000..406bac34f --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/Makefile.variables @@ -0,0 +1,38 @@ +export PROJECT = yq +IMPORT_PATH := github.com/mikefarah/${PROJECT} + +export GIT_COMMIT = $(shell git rev-parse --short HEAD) +export GIT_DIRTY = $(shell test -n "$$(git status --porcelain)" && echo "+CHANGES" || true) +export GIT_DESCRIBE = $(shell git describe --tags --always) +LDFLAGS := +LDFLAGS += -X main.GitCommit=${GIT_COMMIT}${GIT_DIRTY} +LDFLAGS += -X main.GitDescribe=${GIT_DESCRIBE} + +GITHUB_TOKEN ?= + +# Windows environment? +CYG_CHECK := $(shell hash cygpath 2>/dev/null && echo 1) +ifeq ($(CYG_CHECK),1) + VBOX_CHECK := $(shell hash VBoxManage 2>/dev/null && echo 1) + + # Docker Toolbox (pre-Windows 10) + ifeq ($(VBOX_CHECK),1) + ROOT := /${PROJECT} + else + # Docker Windows + ROOT := $(shell cygpath -m -a "$(shell pwd)") + endif +else + # all non-windows environments + ROOT := $(shell pwd) +endif + +DEV_IMAGE := ${PROJECT}_dev + +DOCKRUN := docker run --rm \ + -e LDFLAGS="${LDFLAGS}" \ + -e GITHUB_TOKEN="${GITHUB_TOKEN}" \ + -v ${ROOT}/vendor:/go/src \ + -v ${ROOT}:/${PROJECT}/src/${IMPORT_PATH} \ + -w /${PROJECT}/src/${IMPORT_PATH} \ + ${DEV_IMAGE} diff --git a/vendor/github.com/mikefarah/yq/v2/README.md b/vendor/github.com/mikefarah/yq/v2/README.md new file mode 100644 index 000000000..f11624a9d --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/README.md @@ -0,0 +1,127 @@ +# yq + +[![Build Status](https://travis-ci.org/mikefarah/yq.svg?branch=master)](https://travis-ci.org/mikefarah/yq) ![Docker Pulls](https://img.shields.io/docker/pulls/mikefarah/yq.svg) ![Github Releases (by Release)](https://img.shields.io/github/downloads/mikefarah/yq/total.svg) ![Go Report](https://goreportcard.com/badge/github.com/mikefarah/yq) + + +a lightweight and portable command-line YAML processor + +The aim of the project is to be the [jq](https://github.com/stedolan/jq) or sed of yaml files. + +## Install +### On MacOS: +``` +brew install yq +``` +### On Ubuntu and other Linux distros supporting `snap` packages: +``` +snap install yq +``` + +#### Snap notes +`yq` installs with with [_strict confinement_](https://docs.snapcraft.io/snap-confinement/6233) in snap, this means it doesn't have direct access to root files. To read root files you can: + +``` +sudo cat /etc/myfile | yq -r - somecommand +``` + +And to write to a root file you can either use [sponge](https://linux.die.net/man/1/sponge): +``` +sudo cat /etc/myfile | yq -r - somecommand | sudo sponge /etc/myfile +``` +or write to a temporary file: +``` +sudo cat /etc/myfile | yq -r - somecommand | sudo tee /etc/myfile.tmp +sudo mv /etc/myfile.tmp /etc/myfile +rm /etc/myfile.tmp +``` + +### On Ubuntu 16.04 or higher from Debian package: +``` +sudo add-apt-repository ppa:rmescandon/yq +sudo apt update +sudo apt install yq -y +``` +### or, [Download latest binary](https://github.com/mikefarah/yq/releases/latest) or alternatively: +``` +GO111MODULE=on go get github.com/mikefarah/yq@2.4.1 +``` + +## Run with Docker + +Oneshot use: + +```bash +docker run --rm -v ${PWD}:/workdir mikefarah/yq yq [flags] FILE... +``` + +Run commands interactively: + +```bash +docker run --rm -it -v ${PWD}:/workdir mikefarah/yq sh +``` + +It can be useful to have a bash function to avoid typing the whole docker command: + +```bash +yq() { + docker run --rm -i -v ${PWD}:/workdir mikefarah/yq yq $@ +} +``` + +## Features +- Written in portable go, so you can download a lovely dependency free binary +- Deep read a yaml file with a given path +- Update a yaml file given a path +- Update a yaml file given a script file +- Update creates any missing entries in the path on the fly +- Create a yaml file given a deep path and value +- Create a yaml file given a script file +- Prefix a path to a yaml file +- Convert from json to yaml +- Convert from yaml to json +- Pipe data in by using '-' +- Merge multiple yaml files where each additional file sets values for missing or null value keys. +- Merge multiple yaml files and override previous values. +- Merge multiple yaml files and append array values. +- Supports multiple documents in a single yaml file + +## [Usage](http://mikefarah.github.io/yq/) + +Check out the [documentation](http://mikefarah.github.io/yq/) for more detailed and advanced usage. + +``` +yq is a lightweight and portable command-line YAML processor. It aims to be the jq or sed of yaml files. + +Usage: + yq [flags] + yq [command] + +Available Commands: + delete yq d [--inplace/-i] [--doc/-d index] sample.yaml a.b.c + help Help about any command + merge yq m [--inplace/-i] [--doc/-d index] [--overwrite/-x] [--append/-a] sample.yaml sample2.yaml + new yq n [--script/-s script_file] a.b.c newValue + prefix yq p [--inplace/-i] [--doc/-d index] sample.yaml a.b.c + read yq r [--doc/-d index] sample.yaml a.b.c + write yq w [--inplace/-i] [--script/-s script_file] [--doc/-d index] sample.yaml a.b.c newValue + +Flags: + -h, --help help for yq + -t, --trim trim yaml output (default true) + -v, --verbose verbose mode + -V, --version Print version information and quit + +Use "yq [command] --help" for more information about a command. +``` + +## Contribute +1. `scripts/devtools.sh` +2. `make [local] vendor` +3. add unit tests +4. apply changes to go.mod +5. `make [local] build` +6. If required, update the user documentation + - Update README.md and/or documentation under the mkdocs folder + - `make [local] build-docs` + - browse to docs/index.html and check your changes +7. profit diff --git a/vendor/github.com/mikefarah/yq/v2/data_navigator.go b/vendor/github.com/mikefarah/yq/v2/data_navigator.go new file mode 100644 index 000000000..5746db709 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/data_navigator.go @@ -0,0 +1,349 @@ +package main + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + yaml "github.com/mikefarah/yaml/v2" +) + +func matchesKey(key string, actual interface{}) bool { + var actualString = fmt.Sprintf("%v", actual) + var prefixMatch = strings.TrimSuffix(key, "*") + if prefixMatch != key { + return strings.HasPrefix(actualString, prefixMatch) + } + return actualString == key +} + +func entriesInSlice(context yaml.MapSlice, key string) []*yaml.MapItem { + var matches = make([]*yaml.MapItem, 0) + for idx := range context { + var entry = &context[idx] + if matchesKey(key, entry.Key) { + matches = append(matches, entry) + } + } + return matches +} + +func getMapSlice(context interface{}) yaml.MapSlice { + var mapSlice yaml.MapSlice + switch context := context.(type) { + case yaml.MapSlice: + mapSlice = context + default: + mapSlice = make(yaml.MapSlice, 0) + } + return mapSlice +} + +func getArray(context interface{}) (array []interface{}, ok bool) { + switch context := context.(type) { + case []interface{}: + array = context + ok = true + default: + array = make([]interface{}, 0) + ok = false + } + return +} + +func writeMap(context interface{}, paths []string, value interface{}) interface{} { + log.Debugf("writeMap with path %v for %v to set value %v\n", paths, context, value) + + mapSlice := getMapSlice(context) + + if len(paths) == 0 { + return context + } + + children := entriesInSlice(mapSlice, paths[0]) + + if len(children) == 0 && paths[0] == "*" { + log.Debugf("\tNo matches, return map as is") + return context + } + + if len(children) == 0 { + newChild := yaml.MapItem{Key: paths[0]} + mapSlice = append(mapSlice, newChild) + children = entriesInSlice(mapSlice, paths[0]) + log.Debugf("\tAppended child at %v for mapSlice %v\n", paths[0], mapSlice) + } + + remainingPaths := paths[1:] + for _, child := range children { + child.Value = updatedChildValue(child.Value, remainingPaths, value) + } + log.Debugf("\tReturning mapSlice %v\n", mapSlice) + return mapSlice +} + +func updatedChildValue(child interface{}, remainingPaths []string, value interface{}) interface{} { + if len(remainingPaths) == 0 { + return value + } + log.Debugf("updatedChildValue for child %v with path %v to set value %v", child, remainingPaths, value) + log.Debugf("type of child is %v", reflect.TypeOf(child)) + + switch child := child.(type) { + case nil: + if remainingPaths[0] == "+" || remainingPaths[0] == "*" { + return writeArray(child, remainingPaths, value) + } + case []interface{}: + _, nextIndexErr := strconv.ParseInt(remainingPaths[0], 10, 64) + arrayCommand := nextIndexErr == nil || remainingPaths[0] == "+" || remainingPaths[0] == "*" + if arrayCommand { + return writeArray(child, remainingPaths, value) + } + } + return writeMap(child, remainingPaths, value) +} + +func writeArray(context interface{}, paths []string, value interface{}) []interface{} { + log.Debugf("writeArray with path %v for %v to set value %v\n", paths, context, value) + array, _ := getArray(context) + + if len(paths) == 0 { + return array + } + + log.Debugf("\tarray %v\n", array) + + rawIndex := paths[0] + remainingPaths := paths[1:] + var index int64 + // the append array indicator + if rawIndex == "+" { + index = int64(len(array)) + } else if rawIndex == "*" { + for index, oldChild := range array { + array[index] = updatedChildValue(oldChild, remainingPaths, value) + } + return array + } else { + index, _ = strconv.ParseInt(rawIndex, 10, 64) // nolint + // writeArray is only called by updatedChildValue which handles parsing the + // index, as such this renders this dead code. + } + + for index >= int64(len(array)) { + array = append(array, nil) + } + currentChild := array[index] + + log.Debugf("\tcurrentChild %v\n", currentChild) + + array[index] = updatedChildValue(currentChild, remainingPaths, value) + log.Debugf("\tReturning array %v\n", array) + return array +} + +func readMap(context yaml.MapSlice, head string, tail []string) (interface{}, error) { + log.Debugf("readingMap %v with key %v\n", context, head) + if head == "*" { + return readMapSplat(context, tail) + } + + entries := entriesInSlice(context, head) + if len(entries) == 1 { + return calculateValue(entries[0].Value, tail) + } else if len(entries) == 0 { + return nil, nil + } + var errInIdx error + values := make([]interface{}, len(entries)) + for idx, entry := range entries { + values[idx], errInIdx = calculateValue(entry.Value, tail) + if errInIdx != nil { + log.Errorf("Error updating index %v in %v", idx, context) + return nil, errInIdx + } + + } + return values, nil +} + +func readMapSplat(context yaml.MapSlice, tail []string) (interface{}, error) { + var newArray = make([]interface{}, len(context)) + var i = 0 + for _, entry := range context { + if len(tail) > 0 { + val, err := recurse(entry.Value, tail[0], tail[1:]) + if err != nil { + return nil, err + } + newArray[i] = val + } else { + newArray[i] = entry.Value + } + i++ + } + return newArray, nil +} + +func recurse(value interface{}, head string, tail []string) (interface{}, error) { + switch value := value.(type) { + case []interface{}: + if head == "*" { + return readArraySplat(value, tail) + } + index, err := strconv.ParseInt(head, 10, 64) + if err != nil { + return nil, fmt.Errorf("error accessing array: %v", err) + } + return readArray(value, index, tail) + case yaml.MapSlice: + return readMap(value, head, tail) + default: + return nil, nil + } +} + +func readArray(array []interface{}, head int64, tail []string) (interface{}, error) { + if head >= int64(len(array)) { + return nil, nil + } + + value := array[head] + return calculateValue(value, tail) +} + +func readArraySplat(array []interface{}, tail []string) (interface{}, error) { + var newArray = make([]interface{}, len(array)) + for index, value := range array { + val, err := calculateValue(value, tail) + if err != nil { + return nil, err + } + newArray[index] = val + } + return newArray, nil +} + +func calculateValue(value interface{}, tail []string) (interface{}, error) { + if len(tail) > 0 { + return recurse(value, tail[0], tail[1:]) + } + return value, nil +} + +func deleteMap(context interface{}, paths []string) (yaml.MapSlice, error) { + log.Debugf("deleteMap for %v for %v\n", paths, context) + + mapSlice := getMapSlice(context) + + if len(paths) == 0 { + return mapSlice, nil + } + + var index int + var child yaml.MapItem + for index, child = range mapSlice { + if matchesKey(paths[0], child.Key) { + log.Debugf("\tMatched [%v] with [%v] at index %v", paths[0], child.Key, index) + var badDelete error + mapSlice, badDelete = deleteEntryInMap(mapSlice, child, index, paths) + if badDelete != nil { + return nil, badDelete + } + } + } + + return mapSlice, nil + +} + +func deleteEntryInMap(original yaml.MapSlice, child yaml.MapItem, index int, paths []string) (yaml.MapSlice, error) { + remainingPaths := paths[1:] + + var newSlice yaml.MapSlice + if len(remainingPaths) > 0 { + newChild := yaml.MapItem{Key: child.Key} + var errorDeleting error + newChild.Value, errorDeleting = deleteChildValue(child.Value, remainingPaths) + if errorDeleting != nil { + return nil, errorDeleting + } + + newSlice = make(yaml.MapSlice, len(original)) + for i := range original { + item := original[i] + if i == index { + item = newChild + } + newSlice[i] = item + } + } else { + // Delete item from slice at index + newSlice = append(original[:index], original[index+1:]...) + log.Debugf("\tDeleted item index %d from original", index) + } + + log.Debugf("\tReturning original %v\n", original) + return newSlice, nil +} + +func deleteArraySplat(array []interface{}, tail []string) (interface{}, error) { + log.Debugf("deleteArraySplat for %v for %v\n", tail, array) + var newArray = make([]interface{}, len(array)) + for index, value := range array { + val, err := deleteChildValue(value, tail) + if err != nil { + return nil, err + } + newArray[index] = val + } + return newArray, nil +} + +func deleteArray(array []interface{}, paths []string, index int64) (interface{}, error) { + log.Debugf("deleteArray for %v for %v\n", paths, array) + + if index >= int64(len(array)) { + return array, nil + } + + remainingPaths := paths[1:] + if len(remainingPaths) > 0 { + // Recurse into the array element at index + var errorDeleting error + array[index], errorDeleting = deleteMap(array[index], remainingPaths) + if errorDeleting != nil { + return nil, errorDeleting + } + + } else { + // Delete the array element at index + array = append(array[:index], array[index+1:]...) + log.Debugf("\tDeleted item index %d from array, leaving %v", index, array) + } + + log.Debugf("\tReturning array: %v\n", array) + return array, nil +} + +func deleteChildValue(child interface{}, remainingPaths []string) (interface{}, error) { + log.Debugf("deleteChildValue for %v for %v\n", remainingPaths, child) + var head = remainingPaths[0] + var tail = remainingPaths[1:] + switch child := child.(type) { + case yaml.MapSlice: + return deleteMap(child, remainingPaths) + case []interface{}: + if head == "*" { + return deleteArraySplat(child, tail) + } + index, err := strconv.ParseInt(head, 10, 64) + if err != nil { + return nil, fmt.Errorf("error accessing array: %v", err) + } + return deleteArray(child, remainingPaths, index) + } + return child, nil +} diff --git a/vendor/github.com/mikefarah/yq/v2/go.mod b/vendor/github.com/mikefarah/yq/v2/go.mod new file mode 100644 index 000000000..0b008769e --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/go.mod @@ -0,0 +1,12 @@ +module github.com/mikefarah/yq/v2 + +require ( + github.com/mikefarah/yaml/v2 v2.4.0 + github.com/pkg/errors v0.8.1 + github.com/spf13/cobra v0.0.5 + golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0 // indirect + gopkg.in/imdario/mergo.v0 v0.3.7 + gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 +) + +go 1.13 diff --git a/vendor/github.com/mikefarah/yq/v2/go.sum b/vendor/github.com/mikefarah/yq/v2/go.sum new file mode 100644 index 000000000..40ff65e55 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/go.sum @@ -0,0 +1,50 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mikefarah/yaml/v2 v2.4.0 h1:eYqfooY0BnvKTJxr7+ABJs13n3dg9n347GScDaU2Lww= +github.com/mikefarah/yaml/v2 v2.4.0/go.mod h1:ahVqZF4n1W4NqwvVnZzC4es67xsW9uR/RRf2RRxieJU= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0 h1:s5lp4ug7qHzUccgyFdjsX7OZDzHXRaePrF3B3vmUiuM= +golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/imdario/mergo.v0 v0.3.7 h1:QDotlIZtaO/p+Um0ok18HRTpq5i5/SAk/qprsor+9c8= +gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 h1:6D+BvnJ/j6e222UW8s2qTSe3wGBtvo0MbVQG/c5k8RE= +gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/mikefarah/yq/v2/json_converter.go b/vendor/github.com/mikefarah/yq/v2/json_converter.go new file mode 100644 index 000000000..10e152b9d --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/json_converter.go @@ -0,0 +1,44 @@ +package main + +import ( + "encoding/json" + "fmt" + "strconv" + + yaml "github.com/mikefarah/yaml/v2" +) + +func jsonToString(context interface{}) (string, error) { + out, err := json.Marshal(toJSON(context)) + if err != nil { + return "", fmt.Errorf("error printing yaml as json: %v", err) + } + return string(out), nil +} + +func toJSON(context interface{}) interface{} { + switch context := context.(type) { + case []interface{}: + oldArray := context + newArray := make([]interface{}, len(oldArray)) + for index, value := range oldArray { + newArray[index] = toJSON(value) + } + return newArray + case yaml.MapSlice: + oldMap := context + newMap := make(map[string]interface{}) + for _, entry := range oldMap { + if str, ok := entry.Key.(string); ok { + newMap[str] = toJSON(entry.Value) + } else if i, ok := entry.Key.(int); ok { + newMap[strconv.Itoa(i)] = toJSON(entry.Value) + } else if b, ok := entry.Key.(bool); ok { + newMap[strconv.FormatBool(b)] = toJSON(entry.Value) + } + } + return newMap + default: + return context + } +} diff --git a/vendor/github.com/mikefarah/yq/v2/merge.go b/vendor/github.com/mikefarah/yq/v2/merge.go new file mode 100644 index 000000000..e80bce364 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/merge.go @@ -0,0 +1,12 @@ +package main + +import mergo "gopkg.in/imdario/mergo.v0" + +func merge(dst interface{}, src interface{}, overwrite bool, append bool) error { + if overwrite { + return mergo.Merge(dst, src, mergo.WithOverride) + } else if append { + return mergo.Merge(dst, src, mergo.WithAppendSlice) + } + return mergo.Merge(dst, src) +} diff --git a/vendor/github.com/mikefarah/yq/v2/mkdocs.yml b/vendor/github.com/mikefarah/yq/v2/mkdocs.yml new file mode 100644 index 000000000..99164cbf5 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/mkdocs.yml @@ -0,0 +1,28 @@ +docs_dir: mkdocs +site_dir: docs +site_name: Yq +theme: 'material' +pages: + - Install: index.md + - Read: read.md + - Write/Update: write.md + - Prefix: prefix.md + - Delete: delete.md + - Create: create.md + - Convert: convert.md + - Merge: merge.md +repo_name: 'mikefarah/yq' +repo_url: 'https://github.com/mikefarah/yq' + +extra: + social: + - type: 'github' + link: 'https://github.com/mikefarah' + - type: 'linkedin' + link: 'https://www.linkedin.com/in/mike-farah-b5a75b2/' + +markdown_extensions: + - markdown_include.include: + base_path: mkdocs + - toc: + permalink: True diff --git a/vendor/github.com/mikefarah/yq/v2/path_parser.go b/vendor/github.com/mikefarah/yq/v2/path_parser.go new file mode 100644 index 000000000..0eed4a49d --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/path_parser.go @@ -0,0 +1,55 @@ +package main + +func parsePath(path string) []string { + return parsePathAccum([]string{}, path) +} + +func parsePathAccum(paths []string, remaining string) []string { + head, tail := nextYamlPath(remaining) + if tail == "" { + return append(paths, head) + } + return parsePathAccum(append(paths, head), tail) +} + +func nextYamlPath(path string) (pathElement string, remaining string) { + switch path[0] { + case '[': + // e.g [0].blah.cat -> we need to return "0" and "blah.cat" + return search(path[1:], []uint8{']'}, true) + case '"': + // e.g "a.b".blah.cat -> we need to return "a.b" and "blah.cat" + return search(path[1:], []uint8{'"'}, true) + default: + // e.g "a.blah.cat" -> return "a" and "blah.cat" + return search(path[0:], []uint8{'.', '['}, false) + } +} + +func search(path string, matchingChars []uint8, skipNext bool) (pathElement string, remaining string) { + for i := 0; i < len(path); i++ { + var char = path[i] + if contains(matchingChars, char) { + var remainingStart = i + 1 + if skipNext { + remainingStart = remainingStart + 1 + } else if !skipNext && char != '.' { + remainingStart = i + } + if remainingStart > len(path) { + remainingStart = len(path) + } + return path[0:i], path[remainingStart:] + } + } + return path, "" +} + +func contains(matchingChars []uint8, candidate uint8) bool { + for _, a := range matchingChars { + if a == candidate { + return true + } + } + return false +} diff --git a/vendor/github.com/mikefarah/yq/v2/release_instructions.txt b/vendor/github.com/mikefarah/yq/v2/release_instructions.txt new file mode 100644 index 000000000..07b005f7d --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/release_instructions.txt @@ -0,0 +1,46 @@ +- increment version in version.go +- increment version in snapcraft.yaml +- commit +- tag git with same version number + - be sure to also tag with 'v' for gopkg.in +- make sure local build passes +- push tag to git + - git push --tags +- make local xcompile (builds binaries for all platforms) + +- git release + ./scripts/publish.sh + +- snapcraft + - will auto create a candidate, test it works then promote + - see https://build.snapcraft.io/user/mikefarah/yq + + sudo snap remove yq + sudo snap install --edge yq + + then use the UI (https://snapcraft.io/yq/release) + +- go get + - update the readme instructions + +- brew + - brew bump-formula-pr --url=https://github.com/mikefarah/yq/archive/2.2.0.tar.gz yq + - if that fails with random ruby errors try: + - clearing out the gems rm -rf .gem/ruby/2.3.0 + - export HOMEBREW_FORCE_VENDOR_RUBY=1 + +- docker + - build and push latest and new version tag + - docker build . -t mikefarah/yq:latest -t mikefarah/yq:VERSION + +- debian package + - execute + ```dch -i``` + - fill debian/changelog with changes from last version + - build the package sources + ```debuild -i -I -S -sa``` + (signing with gpg key is required in order to put it to ppa) + - put to PPA + ```dput ppa: ../yq__source.changes``` + (current distro repository is ppa:rmescandon/yq. In case that a new version + is released, please contact rmescandon@gmail.com to bump debian package) \ No newline at end of file diff --git a/vendor/github.com/mikefarah/yq/v2/test.yml b/vendor/github.com/mikefarah/yq/v2/test.yml new file mode 100644 index 000000000..71f1098c6 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/test.yml @@ -0,0 +1,2 @@ +a: apple +c: cat diff --git a/vendor/github.com/mikefarah/yq/v2/version.go b/vendor/github.com/mikefarah/yq/v2/version.go new file mode 100644 index 000000000..eca405f09 --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/version.go @@ -0,0 +1,49 @@ +package main + +import ( + "fmt" + "strings" +) + +// The git commit that was compiled. This will be filled in by the compiler. +var ( + GitCommit string + GitDescribe string + + // Version is main version number that is being run at the moment. + Version = "2.4.1" + + // VersionPrerelease is a pre-release marker for the version. If this is "" (empty string) + // then it means that it is a final release. Otherwise, this is a pre-release + // such as "dev" (in development), "beta", "rc1", etc. + VersionPrerelease = "" +) + +// ProductName is the name of the product +const ProductName = "yq" + +// GetVersionDisplay composes the parts of the version in a way that's suitable +// for displaying to humans. +func GetVersionDisplay() string { + return fmt.Sprintf("%s version %s\n", ProductName, getHumanVersion()) +} + +func getHumanVersion() string { + version := Version + if GitDescribe != "" { + version = GitDescribe + } + + release := VersionPrerelease + if release != "" { + if !strings.Contains(version, release) { + version += fmt.Sprintf("-%s", release) + } + if GitCommit != "" { + version += fmt.Sprintf(" (%s)", GitCommit) + } + } + + // Strip off any single quotes added by the git information. + return strings.Replace(version, "'", "", -1) +} diff --git a/vendor/github.com/mikefarah/yq/v2/yq.go b/vendor/github.com/mikefarah/yq/v2/yq.go new file mode 100644 index 000000000..c59128f1e --- /dev/null +++ b/vendor/github.com/mikefarah/yq/v2/yq.go @@ -0,0 +1,709 @@ +package main + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "strconv" + "strings" + + errors "github.com/pkg/errors" + + yaml "github.com/mikefarah/yaml/v2" + "github.com/spf13/cobra" + logging "gopkg.in/op/go-logging.v1" +) + +var trimOutput = true +var writeInplace = false +var writeScript = "" +var outputToJSON = false +var overwriteFlag = false +var allowEmptyFlag = false +var appendFlag = false +var verbose = false +var version = false +var docIndex = "0" +var log = logging.MustGetLogger("yq") + +func main() { + cmd := newCommandCLI() + if err := cmd.Execute(); err != nil { + log.Error(err.Error()) + os.Exit(1) + } +} + +func newCommandCLI() *cobra.Command { + yaml.DefaultMapType = reflect.TypeOf(yaml.MapSlice{}) + var rootCmd = &cobra.Command{ + Use: "yq", + Short: "yq is a lightweight and portable command-line YAML processor.", + Long: `yq is a lightweight and portable command-line YAML processor. It aims to be the jq or sed of yaml files.`, + RunE: func(cmd *cobra.Command, args []string) error { + if version { + cmd.Print(GetVersionDisplay()) + return nil + } + cmd.Println(cmd.UsageString()) + + return nil + }, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + var format = logging.MustStringFormatter( + `%{color}%{time:15:04:05} %{shortfunc} [%{level:.4s}]%{color:reset} %{message}`, + ) + var backend = logging.AddModuleLevel( + logging.NewBackendFormatter(logging.NewLogBackend(os.Stderr, "", 0), format)) + + if verbose { + backend.SetLevel(logging.DEBUG, "") + } else { + backend.SetLevel(logging.ERROR, "") + } + + logging.SetBackend(backend) + }, + } + + rootCmd.PersistentFlags().BoolVarP(&trimOutput, "trim", "t", true, "trim yaml output") + rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose mode") + rootCmd.Flags().BoolVarP(&version, "version", "V", false, "Print version information and quit") + + rootCmd.AddCommand( + createReadCmd(), + createWriteCmd(), + createPrefixCmd(), + createDeleteCmd(), + createNewCmd(), + createMergeCmd(), + ) + rootCmd.SetOutput(os.Stdout) + + return rootCmd +} + +func createReadCmd() *cobra.Command { + var cmdRead = &cobra.Command{ + Use: "read [yaml_file] [path]", + Aliases: []string{"r"}, + Short: "yq r [--doc/-d index] sample.yaml a.b.c", + Example: ` +yq read things.yaml a.b.c +yq r - a.b.c (reads from stdin) +yq r things.yaml a.*.c +yq r -d1 things.yaml a.array[0].blah +yq r things.yaml a.array[*].blah +yq r -- things.yaml --key-starting-with-dashes + `, + Long: "Outputs the value of the given path in the yaml file to STDOUT", + RunE: readProperty, + } + cmdRead.PersistentFlags().StringVarP(&docIndex, "doc", "d", "0", "process document index number (0 based, * for all documents)") + cmdRead.PersistentFlags().BoolVarP(&outputToJSON, "tojson", "j", false, "output as json") + return cmdRead +} + +func createWriteCmd() *cobra.Command { + var cmdWrite = &cobra.Command{ + Use: "write [yaml_file] [path] [value]", + Aliases: []string{"w"}, + Short: "yq w [--inplace/-i] [--script/-s script_file] [--doc/-d index] sample.yaml a.b.c newValue", + Example: ` +yq write things.yaml a.b.c cat +yq write --inplace -- things.yaml a.b.c --cat +yq w -i things.yaml a.b.c cat +yq w --script update_script.yaml things.yaml +yq w -i -s update_script.yaml things.yaml +yq w --doc 2 things.yaml a.b.d[+] foo +yq w -d2 things.yaml a.b.d[+] foo + `, + Long: `Updates the yaml file w.r.t the given path and value. +Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead. + +Append value to array adds the value to the end of array. + +Update Scripts: +Note that you can give an update script to perform more sophisticated updated. Update script +format is a yaml map where the key is the path and the value is..well the value. e.g.: +--- +a.b.c: true, +a.b.e: + - name: bob +`, + RunE: writeProperty, + } + cmdWrite.PersistentFlags().BoolVarP(&writeInplace, "inplace", "i", false, "update the yaml file inplace") + cmdWrite.PersistentFlags().StringVarP(&writeScript, "script", "s", "", "yaml script for updating yaml") + cmdWrite.PersistentFlags().StringVarP(&docIndex, "doc", "d", "0", "process document index number (0 based, * for all documents)") + return cmdWrite +} + +func createPrefixCmd() *cobra.Command { + var cmdWrite = &cobra.Command{ + Use: "prefix [yaml_file] [path]", + Aliases: []string{"p"}, + Short: "yq p [--inplace/-i] [--doc/-d index] sample.yaml a.b.c", + Example: ` +yq prefix things.yaml a.b.c +yq prefix --inplace things.yaml a.b.c +yq prefix --inplace -- things.yaml --key-starting-with-dash +yq p -i things.yaml a.b.c +yq p --doc 2 things.yaml a.b.d +yq p -d2 things.yaml a.b.d + `, + Long: `Prefixes w.r.t to the yaml file at the given path. +Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead. +`, + RunE: prefixProperty, + } + cmdWrite.PersistentFlags().BoolVarP(&writeInplace, "inplace", "i", false, "update the yaml file inplace") + cmdWrite.PersistentFlags().StringVarP(&docIndex, "doc", "d", "0", "process document index number (0 based, * for all documents)") + return cmdWrite +} + +func createDeleteCmd() *cobra.Command { + var cmdDelete = &cobra.Command{ + Use: "delete [yaml_file] [path]", + Aliases: []string{"d"}, + Short: "yq d [--inplace/-i] [--doc/-d index] sample.yaml a.b.c", + Example: ` +yq delete things.yaml a.b.c +yq delete --inplace things.yaml a.b.c +yq delete --inplace -- things.yaml --key-starting-with-dash +yq d -i things.yaml a.b.c +yq d things.yaml a.b.c + `, + Long: `Deletes the given path from the YAML file. +Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead. +`, + RunE: deleteProperty, + } + cmdDelete.PersistentFlags().BoolVarP(&writeInplace, "inplace", "i", false, "update the yaml file inplace") + cmdDelete.PersistentFlags().StringVarP(&docIndex, "doc", "d", "0", "process document index number (0 based, * for all documents)") + return cmdDelete +} + +func createNewCmd() *cobra.Command { + var cmdNew = &cobra.Command{ + Use: "new [path] [value]", + Aliases: []string{"n"}, + Short: "yq n [--script/-s script_file] a.b.c newValue", + Example: ` +yq new a.b.c cat +yq n a.b.c cat +yq n -- --key-starting-with-dash cat +yq n --script create_script.yaml + `, + Long: `Creates a new yaml w.r.t the given path and value. +Outputs to STDOUT + +Create Scripts: +Note that you can give a create script to perform more sophisticated yaml. This follows the same format as the update script. +`, + RunE: newProperty, + } + cmdNew.PersistentFlags().StringVarP(&writeScript, "script", "s", "", "yaml script for updating yaml") + return cmdNew +} + +func createMergeCmd() *cobra.Command { + var cmdMerge = &cobra.Command{ + Use: "merge [initial_yaml_file] [additional_yaml_file]...", + Aliases: []string{"m"}, + Short: "yq m [--inplace/-i] [--doc/-d index] [--overwrite/-x] [--append/-a] sample.yaml sample2.yaml", + Example: ` +yq merge things.yaml other.yaml +yq merge --inplace things.yaml other.yaml +yq m -i things.yaml other.yaml +yq m --overwrite things.yaml other.yaml +yq m -i -x things.yaml other.yaml +yq m -i -a things.yaml other.yaml + `, + Long: `Updates the yaml file by adding/updating the path(s) and value(s) from additional yaml file(s). +Outputs to STDOUT unless the inplace flag is used, in which case the file is updated instead. + +If overwrite flag is set then existing values will be overwritten using the values from each additional yaml file. +If append flag is set then existing arrays will be merged with the arrays from each additional yaml file. + +Note that if you set both flags only overwrite will take effect. +`, + RunE: mergeProperties, + } + cmdMerge.PersistentFlags().BoolVarP(&writeInplace, "inplace", "i", false, "update the yaml file inplace") + cmdMerge.PersistentFlags().BoolVarP(&overwriteFlag, "overwrite", "x", false, "update the yaml file by overwriting existing values") + cmdMerge.PersistentFlags().BoolVarP(&appendFlag, "append", "a", false, "update the yaml file by appending array values") + cmdMerge.PersistentFlags().BoolVarP(&allowEmptyFlag, "allow-empty", "e", false, "allow empty yaml files") + cmdMerge.PersistentFlags().StringVarP(&docIndex, "doc", "d", "0", "process document index number (0 based, * for all documents)") + return cmdMerge +} + +func readProperty(cmd *cobra.Command, args []string) error { + var path = "" + + if len(args) < 1 { + return errors.New("Must provide filename") + } else if len(args) > 1 { + path = args[1] + } + + var updateAll, docIndexInt, errorParsingDocIndex = parseDocumentIndex() + if errorParsingDocIndex != nil { + return errorParsingDocIndex + } + var mappedDocs []interface{} + var dataBucket interface{} + var currentIndex = 0 + var errorReadingStream = readStream(args[0], func(decoder *yaml.Decoder) error { + for { + errorReading := decoder.Decode(&dataBucket) + if errorReading == io.EOF { + log.Debugf("done %v / %v", currentIndex, docIndexInt) + if !updateAll && currentIndex <= docIndexInt { + return fmt.Errorf("asked to process document index %v but there are only %v document(s)", docIndex, currentIndex) + } + return nil + } + log.Debugf("processing %v - requested index %v", currentIndex, docIndexInt) + if updateAll || currentIndex == docIndexInt { + log.Debugf("reading %v in index %v", path, currentIndex) + mappedDoc, errorParsing := readPath(dataBucket, path) + log.Debugf("%v", mappedDoc) + if errorParsing != nil { + return errors.Wrapf(errorParsing, "Error reading path in document index %v", currentIndex) + } + mappedDocs = append(mappedDocs, mappedDoc) + } + currentIndex = currentIndex + 1 + } + }) + + if errorReadingStream != nil { + return errorReadingStream + } + + if !updateAll { + dataBucket = mappedDocs[0] + } else { + dataBucket = mappedDocs + } + + dataStr, err := toString(dataBucket) + if err != nil { + return err + } + cmd.Println(dataStr) + return nil +} + +func readPath(dataBucket interface{}, path string) (interface{}, error) { + if path == "" { + log.Debug("no path") + return dataBucket, nil + } + var paths = parsePath(path) + return recurse(dataBucket, paths[0], paths[1:]) +} + +func newProperty(cmd *cobra.Command, args []string) error { + updatedData, err := newYaml(args) + if err != nil { + return err + } + dataStr, err := toString(updatedData) + if err != nil { + return err + } + cmd.Println(dataStr) + return nil +} + +func newYaml(args []string) (interface{}, error) { + var writeCommands, writeCommandsError = readWriteCommands(args, 2, "Must provide ") + if writeCommandsError != nil { + return nil, writeCommandsError + } + + var dataBucket interface{} + var isArray = strings.HasPrefix(writeCommands[0].Key.(string), "[") + if isArray { + dataBucket = make([]interface{}, 0) + } else { + dataBucket = make(yaml.MapSlice, 0) + } + + for _, entry := range writeCommands { + path := entry.Key.(string) + value := entry.Value + log.Debugf("setting %v to %v", path, value) + var paths = parsePath(path) + dataBucket = updatedChildValue(dataBucket, paths, value) + } + + return dataBucket, nil +} + +func parseDocumentIndex() (bool, int, error) { + if docIndex == "*" { + return true, -1, nil + } + docIndexInt64, err := strconv.ParseInt(docIndex, 10, 32) + if err != nil { + return false, -1, errors.Wrapf(err, "Document index %v is not a integer or *", docIndex) + } + return false, int(docIndexInt64), nil +} + +type updateDataFn func(dataBucket interface{}, currentIndex int) (interface{}, error) + +func mapYamlDecoder(updateData updateDataFn, encoder *yaml.Encoder) yamlDecoderFn { + return func(decoder *yaml.Decoder) error { + var dataBucket interface{} + var errorReading error + var errorWriting error + var errorUpdating error + var currentIndex = 0 + + var updateAll, docIndexInt, errorParsingDocIndex = parseDocumentIndex() + if errorParsingDocIndex != nil { + return errorParsingDocIndex + } + + for { + log.Debugf("Read doc %v", currentIndex) + errorReading = decoder.Decode(&dataBucket) + + if errorReading == io.EOF { + if !updateAll && currentIndex <= docIndexInt { + return fmt.Errorf("asked to process document index %v but there are only %v document(s)", docIndex, currentIndex) + } + return nil + } else if errorReading != nil { + return errors.Wrapf(errorReading, "Error reading document at index %v, %v", currentIndex, errorReading) + } + dataBucket, errorUpdating = updateData(dataBucket, currentIndex) + if errorUpdating != nil { + return errors.Wrapf(errorUpdating, "Error updating document at index %v", currentIndex) + } + + errorWriting = encoder.Encode(dataBucket) + + if errorWriting != nil { + return errors.Wrapf(errorWriting, "Error writing document at index %v, %v", currentIndex, errorWriting) + } + currentIndex = currentIndex + 1 + } + } +} + +func writeProperty(cmd *cobra.Command, args []string) error { + var writeCommands, writeCommandsError = readWriteCommands(args, 3, "Must provide ") + if writeCommandsError != nil { + return writeCommandsError + } + var updateAll, docIndexInt, errorParsingDocIndex = parseDocumentIndex() + if errorParsingDocIndex != nil { + return errorParsingDocIndex + } + + var updateData = func(dataBucket interface{}, currentIndex int) (interface{}, error) { + if updateAll || currentIndex == docIndexInt { + log.Debugf("Updating doc %v", currentIndex) + for _, entry := range writeCommands { + path := entry.Key.(string) + value := entry.Value + log.Debugf("setting %v to %v", path, value) + var paths = parsePath(path) + dataBucket = updatedChildValue(dataBucket, paths, value) + } + } + return dataBucket, nil + } + return readAndUpdate(cmd.OutOrStdout(), args[0], updateData) +} + +func prefixProperty(cmd *cobra.Command, args []string) error { + if len(args) != 2 { + return errors.New("Must provide ") + } + var updateAll, docIndexInt, errorParsingDocIndex = parseDocumentIndex() + if errorParsingDocIndex != nil { + return errorParsingDocIndex + } + + var paths = parsePath(args[1]) + + // Inverse order + for i := len(paths)/2 - 1; i >= 0; i-- { + opp := len(paths) - 1 - i + paths[i], paths[opp] = paths[opp], paths[i] + } + + var updateData = func(dataBucket interface{}, currentIndex int) (interface{}, error) { + + if updateAll || currentIndex == docIndexInt { + log.Debugf("Prefixing %v to doc %v", paths, currentIndex) + var mapDataBucket = dataBucket + for _, key := range paths { + singlePath := []string{key} + mapDataBucket = updatedChildValue(nil, singlePath, mapDataBucket) + } + return mapDataBucket, nil + } + return dataBucket, nil + } + return readAndUpdate(cmd.OutOrStdout(), args[0], updateData) +} + +func readAndUpdate(stdOut io.Writer, inputFile string, updateData updateDataFn) error { + var destination io.Writer + var destinationName string + if writeInplace { + info, err := os.Stat(inputFile) + if err != nil { + return err + } + tempFile, err := ioutil.TempFile("", "temp") + if err != nil { + return err + } + destinationName = tempFile.Name() + err = os.Chmod(destinationName, info.Mode()) + if err != nil { + return err + } + destination = tempFile + defer func() { + safelyCloseFile(tempFile) + safelyRenameFile(tempFile.Name(), inputFile) + }() + } else { + var writer = bufio.NewWriter(stdOut) + destination = writer + destinationName = "Stdout" + defer safelyFlush(writer) + } + var encoder = yaml.NewEncoder(destination) + log.Debugf("Writing to %v from %v", destinationName, inputFile) + return readStream(inputFile, mapYamlDecoder(updateData, encoder)) +} + +func deleteProperty(cmd *cobra.Command, args []string) error { + if len(args) < 2 { + return errors.New("Must provide ") + } + var deletePath = args[1] + var paths = parsePath(deletePath) + var updateAll, docIndexInt, errorParsingDocIndex = parseDocumentIndex() + if errorParsingDocIndex != nil { + return errorParsingDocIndex + } + + var updateData = func(dataBucket interface{}, currentIndex int) (interface{}, error) { + if updateAll || currentIndex == docIndexInt { + log.Debugf("Deleting path in doc %v", currentIndex) + return deleteChildValue(dataBucket, paths) + } + return dataBucket, nil + } + + return readAndUpdate(cmd.OutOrStdout(), args[0], updateData) +} + +func mergeProperties(cmd *cobra.Command, args []string) error { + if len(args) < 2 { + return errors.New("Must provide at least 2 yaml files") + } + var input = args[0] + var filesToMerge = args[1:] + var updateAll, docIndexInt, errorParsingDocIndex = parseDocumentIndex() + if errorParsingDocIndex != nil { + return errorParsingDocIndex + } + + var updateData = func(dataBucket interface{}, currentIndex int) (interface{}, error) { + if updateAll || currentIndex == docIndexInt { + log.Debugf("Merging doc %v", currentIndex) + var mergedData map[interface{}]interface{} + // merge only works for maps, so put everything in a temporary + // map + var mapDataBucket = make(map[interface{}]interface{}) + mapDataBucket["root"] = dataBucket + if err := merge(&mergedData, mapDataBucket, overwriteFlag, appendFlag); err != nil { + return nil, err + } + for _, f := range filesToMerge { + var fileToMerge interface{} + if err := readData(f, 0, &fileToMerge); err != nil { + if allowEmptyFlag && err == io.EOF { + continue + } + return nil, err + } + mapDataBucket["root"] = fileToMerge + if err := merge(&mergedData, mapDataBucket, overwriteFlag, appendFlag); err != nil { + return nil, err + } + } + return mergedData["root"], nil + } + return dataBucket, nil + } + yaml.DefaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + defer func() { yaml.DefaultMapType = reflect.TypeOf(yaml.MapSlice{}) }() + return readAndUpdate(cmd.OutOrStdout(), input, updateData) +} + +func readWriteCommands(args []string, expectedArgs int, badArgsMessage string) (yaml.MapSlice, error) { + var writeCommands yaml.MapSlice + if writeScript != "" { + if err := readData(writeScript, 0, &writeCommands); err != nil { + return nil, err + } + } else if len(args) < expectedArgs { + return nil, errors.New(badArgsMessage) + } else { + writeCommands = make(yaml.MapSlice, 1) + writeCommands[0] = yaml.MapItem{Key: args[expectedArgs-2], Value: parseValue(args[expectedArgs-1])} + } + return writeCommands, nil +} + +func parseValue(argument string) interface{} { + var value, err interface{} + var inQuotes = len(argument) > 0 && argument[0] == '"' + if !inQuotes { + value, err = strconv.ParseFloat(argument, 64) + if err == nil { + return value + } + value, err = strconv.ParseBool(argument) + if err == nil { + return value + } + if argument == "[]" { + return make([]interface{}, 0) + } + return argument + } + return argument[1 : len(argument)-1] +} + +func toString(context interface{}) (string, error) { + if outputToJSON { + return jsonToString(context) + } + return yamlToString(context) +} + +func yamlToString(context interface{}) (string, error) { + switch context := context.(type) { + case string: + return context, nil + default: + return marshalContext(context) + } +} + +func marshalContext(context interface{}) (string, error) { + out, err := yaml.Marshal(context) + + if err != nil { + return "", errors.Wrap(err, "error printing yaml") + } + + outStr := string(out) + // trim the trailing new line as it's easier for a script to add + // it in if required than to remove it + if trimOutput { + return strings.Trim(outStr, "\n "), nil + } + return outStr, nil +} + +func safelyRenameFile(from string, to string) { + if renameError := os.Rename(from, to); renameError != nil { + log.Debugf("Error renaming from %v to %v, attemting to copy contents", from, to) + log.Debug(renameError.Error()) + // can't do this rename when running in docker to a file targeted in a mounted volume, + // so gracefully degrade to copying the entire contents. + if copyError := copyFileContents(from, to); copyError != nil { + log.Errorf("Failed copying from %v to %v", from, to) + log.Error(copyError.Error()) + } else { + removeErr := os.Remove(from) + if removeErr != nil { + log.Errorf("failed removing original file: %s", from) + } + } + } +} + +// thanks https://stackoverflow.com/questions/21060945/simple-way-to-copy-a-file-in-golang +func copyFileContents(src, dst string) (err error) { + in, err := os.Open(src) // nolint gosec + if err != nil { + return err + } + defer safelyCloseFile(in) + out, err := os.Create(dst) + if err != nil { + return err + } + defer safelyCloseFile(out) + if _, err = io.Copy(out, in); err != nil { + return err + } + return out.Sync() +} + +func safelyFlush(writer *bufio.Writer) { + if err := writer.Flush(); err != nil { + log.Error("Error flushing writer!") + log.Error(err.Error()) + } + +} +func safelyCloseFile(file *os.File) { + err := file.Close() + if err != nil { + log.Error("Error closing file!") + log.Error(err.Error()) + } +} + +type yamlDecoderFn func(*yaml.Decoder) error + +func readStream(filename string, yamlDecoder yamlDecoderFn) error { + if filename == "" { + return errors.New("Must provide filename") + } + + var stream io.Reader + if filename == "-" { + stream = bufio.NewReader(os.Stdin) + } else { + file, err := os.Open(filename) // nolint gosec + if err != nil { + return err + } + defer safelyCloseFile(file) + stream = file + } + return yamlDecoder(yaml.NewDecoder(stream)) +} + +func readData(filename string, indexToRead int, parsedData interface{}) error { + return readStream(filename, func(decoder *yaml.Decoder) error { + for currentIndex := 0; currentIndex < indexToRead; currentIndex++ { + errorSkipping := decoder.Decode(parsedData) + if errorSkipping != nil { + return errors.Wrapf(errorSkipping, "Error processing document at index %v, %v", currentIndex, errorSkipping) + } + } + return decoder.Decode(parsedData) + }) +} diff --git a/vendor/golang.org/x/lint/.travis.yml b/vendor/golang.org/x/lint/.travis.yml new file mode 100644 index 000000000..50553ebd0 --- /dev/null +++ b/vendor/golang.org/x/lint/.travis.yml @@ -0,0 +1,19 @@ +sudo: false +language: go +go: + - 1.10.x + - 1.11.x + - master + +go_import_path: golang.org/x/lint + +install: + - go get -t -v ./... + +script: + - go test -v -race ./... + +matrix: + allow_failures: + - go: master + fast_finish: true diff --git a/vendor/golang.org/x/lint/CONTRIBUTING.md b/vendor/golang.org/x/lint/CONTRIBUTING.md new file mode 100644 index 000000000..1fadda62d --- /dev/null +++ b/vendor/golang.org/x/lint/CONTRIBUTING.md @@ -0,0 +1,15 @@ +# Contributing to Golint + +## Before filing an issue: + +### Are you having trouble building golint? + +Check you have the latest version of its dependencies. Run +``` +go get -u golang.org/x/lint/golint +``` +If you still have problems, consider searching for existing issues before filing a new issue. + +## Before sending a pull request: + +Have you understood the purpose of golint? Make sure to carefully read `README`. diff --git a/vendor/golang.org/x/lint/LICENSE b/vendor/golang.org/x/lint/LICENSE new file mode 100644 index 000000000..65d761bc9 --- /dev/null +++ b/vendor/golang.org/x/lint/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/lint/README.md b/vendor/golang.org/x/lint/README.md new file mode 100644 index 000000000..4968b13ae --- /dev/null +++ b/vendor/golang.org/x/lint/README.md @@ -0,0 +1,88 @@ +Golint is a linter for Go source code. + +[![Build Status](https://travis-ci.org/golang/lint.svg?branch=master)](https://travis-ci.org/golang/lint) + +## Installation + +Golint requires a +[supported release of Go](https://golang.org/doc/devel/release.html#policy). + + go get -u golang.org/x/lint/golint + +To find out where `golint` was installed you can run `go list -f {{.Target}} golang.org/x/lint/golint`. For `golint` to be used globally add that directory to the `$PATH` environment setting. + +## Usage + +Invoke `golint` with one or more filenames, directories, or packages named +by its import path. Golint uses the same +[import path syntax](https://golang.org/cmd/go/#hdr-Import_path_syntax) as +the `go` command and therefore +also supports relative import paths like `./...`. Additionally the `...` +wildcard can be used as suffix on relative and absolute file paths to recurse +into them. + +The output of this tool is a list of suggestions in Vim quickfix format, +which is accepted by lots of different editors. + +## Purpose + +Golint differs from gofmt. Gofmt reformats Go source code, whereas +golint prints out style mistakes. + +Golint differs from govet. Govet is concerned with correctness, whereas +golint is concerned with coding style. Golint is in use at Google, and it +seeks to match the accepted style of the open source Go project. + +The suggestions made by golint are exactly that: suggestions. +Golint is not perfect, and has both false positives and false negatives. +Do not treat its output as a gold standard. We will not be adding pragmas +or other knobs to suppress specific warnings, so do not expect or require +code to be completely "lint-free". +In short, this tool is not, and will never be, trustworthy enough for its +suggestions to be enforced automatically, for example as part of a build process. +Golint makes suggestions for many of the mechanically checkable items listed in +[Effective Go](https://golang.org/doc/effective_go.html) and the +[CodeReviewComments wiki page](https://golang.org/wiki/CodeReviewComments). + +## Scope + +Golint is meant to carry out the stylistic conventions put forth in +[Effective Go](https://golang.org/doc/effective_go.html) and +[CodeReviewComments](https://golang.org/wiki/CodeReviewComments). +Changes that are not aligned with those documents will not be considered. + +## Contributions + +Contributions to this project are welcome provided they are [in scope](#scope), +though please send mail before starting work on anything major. +Contributors retain their copyright, so we need you to fill out +[a short form](https://developers.google.com/open-source/cla/individual) +before we can accept your contribution. + +## Vim + +Add this to your ~/.vimrc: + + set rtp+=$GOPATH/src/golang.org/x/lint/misc/vim + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running `:Lint` will run golint on the current file and populate the quickfix list. + +Optionally, add this to your `~/.vimrc` to automatically run `golint` on `:w` + + autocmd BufWritePost,FileWritePost *.go execute 'Lint' | cwindow + + +## Emacs + +Add this to your `.emacs` file: + + (add-to-list 'load-path (concat (getenv "GOPATH") "/src/golang.org/x/lint/misc/emacs/")) + (require 'golint) + +If you have multiple entries in your GOPATH, replace `$GOPATH` with the right value. + +Running M-x golint will run golint on the current file. + +For more usage, see [Compilation-Mode](http://www.gnu.org/software/emacs/manual/html_node/emacs/Compilation-Mode.html). diff --git a/vendor/golang.org/x/lint/go.mod b/vendor/golang.org/x/lint/go.mod new file mode 100644 index 000000000..44179f3a4 --- /dev/null +++ b/vendor/golang.org/x/lint/go.mod @@ -0,0 +1,5 @@ +module golang.org/x/lint + +go 1.11 + +require golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f diff --git a/vendor/golang.org/x/lint/go.sum b/vendor/golang.org/x/lint/go.sum new file mode 100644 index 000000000..539c98a94 --- /dev/null +++ b/vendor/golang.org/x/lint/go.sum @@ -0,0 +1,8 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/lint/golint/golint.go b/vendor/golang.org/x/lint/golint/golint.go new file mode 100644 index 000000000..ac024b6d2 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/golint.go @@ -0,0 +1,159 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// golint lints the Go source files named on its command line. +package main + +import ( + "flag" + "fmt" + "go/build" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "golang.org/x/lint" +) + +var ( + minConfidence = flag.Float64("min_confidence", 0.8, "minimum confidence of a problem to print it") + setExitStatus = flag.Bool("set_exit_status", false, "set exit status to 1 if any issues are found") + suggestions int +) + +func usage() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + fmt.Fprintf(os.Stderr, "\tgolint [flags] # runs on package in current directory\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [packages]\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [directories] # where a '/...' suffix includes all sub-directories\n") + fmt.Fprintf(os.Stderr, "\tgolint [flags] [files] # all must belong to a single package\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + flag.Usage = usage + flag.Parse() + + if flag.NArg() == 0 { + lintDir(".") + } else { + // dirsRun, filesRun, and pkgsRun indicate whether golint is applied to + // directory, file or package targets. The distinction affects which + // checks are run. It is no valid to mix target types. + var dirsRun, filesRun, pkgsRun int + var args []string + for _, arg := range flag.Args() { + if strings.HasSuffix(arg, "/...") && isDir(arg[:len(arg)-len("/...")]) { + dirsRun = 1 + for _, dirname := range allPackagesInFS(arg) { + args = append(args, dirname) + } + } else if isDir(arg) { + dirsRun = 1 + args = append(args, arg) + } else if exists(arg) { + filesRun = 1 + args = append(args, arg) + } else { + pkgsRun = 1 + args = append(args, arg) + } + } + + if dirsRun+filesRun+pkgsRun != 1 { + usage() + os.Exit(2) + } + switch { + case dirsRun == 1: + for _, dir := range args { + lintDir(dir) + } + case filesRun == 1: + lintFiles(args...) + case pkgsRun == 1: + for _, pkg := range importPaths(args) { + lintPackage(pkg) + } + } + } + + if *setExitStatus && suggestions > 0 { + fmt.Fprintf(os.Stderr, "Found %d lint suggestions; failing.\n", suggestions) + os.Exit(1) + } +} + +func isDir(filename string) bool { + fi, err := os.Stat(filename) + return err == nil && fi.IsDir() +} + +func exists(filename string) bool { + _, err := os.Stat(filename) + return err == nil +} + +func lintFiles(filenames ...string) { + files := make(map[string][]byte) + for _, filename := range filenames { + src, err := ioutil.ReadFile(filename) + if err != nil { + fmt.Fprintln(os.Stderr, err) + continue + } + files[filename] = src + } + + l := new(lint.Linter) + ps, err := l.LintFiles(files) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return + } + for _, p := range ps { + if p.Confidence >= *minConfidence { + fmt.Printf("%v: %s\n", p.Position, p.Text) + suggestions++ + } + } +} + +func lintDir(dirname string) { + pkg, err := build.ImportDir(dirname, 0) + lintImportedPackage(pkg, err) +} + +func lintPackage(pkgname string) { + pkg, err := build.Import(pkgname, ".", 0) + lintImportedPackage(pkg, err) +} + +func lintImportedPackage(pkg *build.Package, err error) { + if err != nil { + if _, nogo := err.(*build.NoGoError); nogo { + // Don't complain if the failure is due to no Go source files. + return + } + fmt.Fprintln(os.Stderr, err) + return + } + + var files []string + files = append(files, pkg.GoFiles...) + files = append(files, pkg.CgoFiles...) + files = append(files, pkg.TestGoFiles...) + if pkg.Dir != "." { + for i, f := range files { + files[i] = filepath.Join(pkg.Dir, f) + } + } + // TODO(dsymonds): Do foo_test too (pkg.XTestGoFiles) + + lintFiles(files...) +} diff --git a/vendor/golang.org/x/lint/golint/import.go b/vendor/golang.org/x/lint/golint/import.go new file mode 100644 index 000000000..2ba9dea77 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/import.go @@ -0,0 +1,309 @@ +package main + +/* + +This file holds a direct copy of the import path matching code of +https://github.com/golang/go/blob/master/src/cmd/go/main.go. It can be +replaced when https://golang.org/issue/8768 is resolved. + +It has been updated to follow upstream changes in a few ways. + +*/ + +import ( + "fmt" + "go/build" + "log" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var ( + buildContext = build.Default + goroot = filepath.Clean(runtime.GOROOT()) + gorootSrc = filepath.Join(goroot, "src") +) + +// importPathsNoDotExpansion returns the import paths to use for the given +// command line, but it does no ... expansion. +func importPathsNoDotExpansion(args []string) []string { + if len(args) == 0 { + return []string{"."} + } + var out []string + for _, a := range args { + // Arguments are supposed to be import paths, but + // as a courtesy to Windows developers, rewrite \ to / + // in command-line arguments. Handles .\... and so on. + if filepath.Separator == '\\' { + a = strings.Replace(a, `\`, `/`, -1) + } + + // Put argument in canonical form, but preserve leading ./. + if strings.HasPrefix(a, "./") { + a = "./" + path.Clean(a) + if a == "./." { + a = "." + } + } else { + a = path.Clean(a) + } + if a == "all" || a == "std" { + out = append(out, allPackages(a)...) + continue + } + out = append(out, a) + } + return out +} + +// importPaths returns the import paths to use for the given command line. +func importPaths(args []string) []string { + args = importPathsNoDotExpansion(args) + var out []string + for _, a := range args { + if strings.Contains(a, "...") { + if build.IsLocalImport(a) { + out = append(out, allPackagesInFS(a)...) + } else { + out = append(out, allPackages(a)...) + } + continue + } + out = append(out, a) + } + return out +} + +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +func matchPattern(pattern string) func(name string) bool { + re := regexp.QuoteMeta(pattern) + re = strings.Replace(re, `\.\.\.`, `.*`, -1) + // Special case: foo/... matches foo too. + if strings.HasSuffix(re, `/.*`) { + re = re[:len(re)-len(`/.*`)] + `(/.*)?` + } + reg := regexp.MustCompile(`^` + re + `$`) + return func(name string) bool { + return reg.MatchString(name) + } +} + +// hasPathPrefix reports whether the path s begins with the +// elements in prefix. +func hasPathPrefix(s, prefix string) bool { + switch { + default: + return false + case len(s) == len(prefix): + return s == prefix + case len(s) > len(prefix): + if prefix != "" && prefix[len(prefix)-1] == '/' { + return strings.HasPrefix(s, prefix) + } + return s[len(prefix)] == '/' && s[:len(prefix)] == prefix + } +} + +// treeCanMatchPattern(pattern)(name) reports whether +// name or children of name can possibly match pattern. +// Pattern is the same limited glob accepted by matchPattern. +func treeCanMatchPattern(pattern string) func(name string) bool { + wildCard := false + if i := strings.Index(pattern, "..."); i >= 0 { + wildCard = true + pattern = pattern[:i] + } + return func(name string) bool { + return len(name) <= len(pattern) && hasPathPrefix(pattern, name) || + wildCard && strings.HasPrefix(name, pattern) + } +} + +// allPackages returns all the packages that can be found +// under the $GOPATH directories and $GOROOT matching pattern. +// The pattern is either "all" (all packages), "std" (standard packages) +// or a path including "...". +func allPackages(pattern string) []string { + pkgs := matchPackages(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackages(pattern string) []string { + match := func(string) bool { return true } + treeCanMatch := func(string) bool { return true } + if pattern != "all" && pattern != "std" { + match = matchPattern(pattern) + treeCanMatch = treeCanMatchPattern(pattern) + } + + have := map[string]bool{ + "builtin": true, // ignore pseudo-package that exists only for documentation + } + if !buildContext.CgoEnabled { + have["runtime/cgo"] = true // ignore during walk + } + var pkgs []string + + // Commands + cmd := filepath.Join(goroot, "src/cmd") + string(filepath.Separator) + filepath.Walk(cmd, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == cmd { + return nil + } + name := path[len(cmd):] + if !treeCanMatch(name) { + return filepath.SkipDir + } + // Commands are all in cmd/, not in subdirectories. + if strings.Contains(name, string(filepath.Separator)) { + return filepath.SkipDir + } + + // We use, e.g., cmd/gofmt as the pseudo import path for gofmt. + name = "cmd/" + name + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + + for _, src := range buildContext.SrcDirs() { + if (pattern == "std" || pattern == "cmd") && src != gorootSrc { + continue + } + src = filepath.Clean(src) + string(filepath.Separator) + root := src + if pattern == "cmd" { + root += "cmd" + string(filepath.Separator) + } + filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + // Avoid .foo, _foo, and testdata directory trees. + _, elem := filepath.Split(path) + if strings.HasPrefix(elem, ".") || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if pattern == "std" && (strings.Contains(name, ".") || name == "cmd") { + // The name "std" is only the standard library. + // If the name is cmd, it's the root of the command tree. + return filepath.SkipDir + } + if !treeCanMatch(name) { + return filepath.SkipDir + } + if have[name] { + return nil + } + have[name] = true + if !match(name) { + return nil + } + _, err = buildContext.ImportDir(path, 0) + if err != nil { + if _, noGo := err.(*build.NoGoError); noGo { + return nil + } + } + pkgs = append(pkgs, name) + return nil + }) + } + return pkgs +} + +// allPackagesInFS is like allPackages but is passed a pattern +// beginning ./ or ../, meaning it should scan the tree rooted +// at the given directory. There are ... in the pattern too. +func allPackagesInFS(pattern string) []string { + pkgs := matchPackagesInFS(pattern) + if len(pkgs) == 0 { + fmt.Fprintf(os.Stderr, "warning: %q matched no packages\n", pattern) + } + return pkgs +} + +func matchPackagesInFS(pattern string) []string { + // Find directory to begin the scan. + // Could be smarter but this one optimization + // is enough for now, since ... is usually at the + // end of a path. + i := strings.Index(pattern, "...") + dir, _ := path.Split(pattern[:i]) + + // pattern begins with ./ or ../. + // path.Clean will discard the ./ but not the ../. + // We need to preserve the ./ for pattern matching + // and in the returned import paths. + prefix := "" + if strings.HasPrefix(pattern, "./") { + prefix = "./" + } + match := matchPattern(pattern) + + var pkgs []string + filepath.Walk(dir, func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() { + return nil + } + if path == dir { + // filepath.Walk starts at dir and recurses. For the recursive case, + // the path is the result of filepath.Join, which calls filepath.Clean. + // The initial case is not Cleaned, though, so we do this explicitly. + // + // This converts a path like "./io/" to "io". Without this step, running + // "cd $GOROOT/src/pkg; go list ./io/..." would incorrectly skip the io + // package, because prepending the prefix "./" to the unclean path would + // result in "././io", and match("././io") returns false. + path = filepath.Clean(path) + } + + // Avoid .foo, _foo, and testdata directory trees, but do not avoid "." or "..". + _, elem := filepath.Split(path) + dot := strings.HasPrefix(elem, ".") && elem != "." && elem != ".." + if dot || strings.HasPrefix(elem, "_") || elem == "testdata" { + return filepath.SkipDir + } + + name := prefix + filepath.ToSlash(path) + if !match(name) { + return nil + } + if _, err = build.ImportDir(path, 0); err != nil { + if _, noGo := err.(*build.NoGoError); !noGo { + log.Print(err) + } + return nil + } + pkgs = append(pkgs, name) + return nil + }) + return pkgs +} diff --git a/vendor/golang.org/x/lint/golint/importcomment.go b/vendor/golang.org/x/lint/golint/importcomment.go new file mode 100644 index 000000000..d5b32f734 --- /dev/null +++ b/vendor/golang.org/x/lint/golint/importcomment.go @@ -0,0 +1,13 @@ +// Copyright (c) 2018 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// +build go1.12 + +// Require use of the correct import path only for Go 1.12+ users, so +// any breakages coincide with people updating their CI configs or +// whatnot. + +package main // import "golang.org/x/lint/golint" diff --git a/vendor/golang.org/x/lint/lint.go b/vendor/golang.org/x/lint/lint.go new file mode 100644 index 000000000..532a75ad2 --- /dev/null +++ b/vendor/golang.org/x/lint/lint.go @@ -0,0 +1,1614 @@ +// Copyright (c) 2013 The Go Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file or at +// https://developers.google.com/open-source/licenses/bsd. + +// Package lint contains a linter for Go source code. +package lint // import "golang.org/x/lint" + +import ( + "bufio" + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/printer" + "go/token" + "go/types" + "regexp" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/tools/go/ast/astutil" + "golang.org/x/tools/go/gcexportdata" +) + +const styleGuideBase = "https://golang.org/wiki/CodeReviewComments" + +// A Linter lints Go source code. +type Linter struct { +} + +// Problem represents a problem in some source code. +type Problem struct { + Position token.Position // position in source file + Text string // the prose that describes the problem + Link string // (optional) the link to the style guide for the problem + Confidence float64 // a value in (0,1] estimating the confidence in this problem's correctness + LineText string // the source line + Category string // a short name for the general category of the problem + + // If the problem has a suggested fix (the minority case), + // ReplacementLine is a full replacement for the relevant line of the source file. + ReplacementLine string +} + +func (p *Problem) String() string { + if p.Link != "" { + return p.Text + "\n\n" + p.Link + } + return p.Text +} + +type byPosition []Problem + +func (p byPosition) Len() int { return len(p) } +func (p byPosition) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p byPosition) Less(i, j int) bool { + pi, pj := p[i].Position, p[j].Position + + if pi.Filename != pj.Filename { + return pi.Filename < pj.Filename + } + if pi.Line != pj.Line { + return pi.Line < pj.Line + } + if pi.Column != pj.Column { + return pi.Column < pj.Column + } + + return p[i].Text < p[j].Text +} + +// Lint lints src. +func (l *Linter) Lint(filename string, src []byte) ([]Problem, error) { + return l.LintFiles(map[string][]byte{filename: src}) +} + +// LintFiles lints a set of files of a single package. +// The argument is a map of filename to source. +func (l *Linter) LintFiles(files map[string][]byte) ([]Problem, error) { + pkg := &pkg{ + fset: token.NewFileSet(), + files: make(map[string]*file), + } + var pkgName string + for filename, src := range files { + if isGenerated(src) { + continue // See issue #239 + } + f, err := parser.ParseFile(pkg.fset, filename, src, parser.ParseComments) + if err != nil { + return nil, err + } + if pkgName == "" { + pkgName = f.Name.Name + } else if f.Name.Name != pkgName { + return nil, fmt.Errorf("%s is in package %s, not %s", filename, f.Name.Name, pkgName) + } + pkg.files[filename] = &file{ + pkg: pkg, + f: f, + fset: pkg.fset, + src: src, + filename: filename, + } + } + if len(pkg.files) == 0 { + return nil, nil + } + return pkg.lint(), nil +} + +var ( + genHdr = []byte("// Code generated ") + genFtr = []byte(" DO NOT EDIT.") +) + +// isGenerated reports whether the source file is generated code +// according the rules from https://golang.org/s/generatedcode. +func isGenerated(src []byte) bool { + sc := bufio.NewScanner(bytes.NewReader(src)) + for sc.Scan() { + b := sc.Bytes() + if bytes.HasPrefix(b, genHdr) && bytes.HasSuffix(b, genFtr) && len(b) >= len(genHdr)+len(genFtr) { + return true + } + } + return false +} + +// pkg represents a package being linted. +type pkg struct { + fset *token.FileSet + files map[string]*file + + typesPkg *types.Package + typesInfo *types.Info + + // sortable is the set of types in the package that implement sort.Interface. + sortable map[string]bool + // main is whether this is a "main" package. + main bool + + problems []Problem +} + +func (p *pkg) lint() []Problem { + if err := p.typeCheck(); err != nil { + /* TODO(dsymonds): Consider reporting these errors when golint operates on entire packages. + if e, ok := err.(types.Error); ok { + pos := p.fset.Position(e.Pos) + conf := 1.0 + if strings.Contains(e.Msg, "can't find import: ") { + // Golint is probably being run in a context that doesn't support + // typechecking (e.g. package files aren't found), so don't warn about it. + conf = 0 + } + if conf > 0 { + p.errorfAt(pos, conf, category("typechecking"), e.Msg) + } + + // TODO(dsymonds): Abort if !e.Soft? + } + */ + } + + p.scanSortable() + p.main = p.isMain() + + for _, f := range p.files { + f.lint() + } + + sort.Sort(byPosition(p.problems)) + + return p.problems +} + +// file represents a file being linted. +type file struct { + pkg *pkg + f *ast.File + fset *token.FileSet + src []byte + filename string +} + +func (f *file) isTest() bool { return strings.HasSuffix(f.filename, "_test.go") } + +func (f *file) lint() { + f.lintPackageComment() + f.lintImports() + f.lintBlankImports() + f.lintExported() + f.lintNames() + f.lintElses() + f.lintRanges() + f.lintErrorf() + f.lintErrors() + f.lintErrorStrings() + f.lintReceiverNames() + f.lintIncDec() + f.lintErrorReturn() + f.lintUnexportedReturn() + f.lintTimeNames() + f.lintContextKeyTypes() + f.lintContextArgs() +} + +type link string +type category string + +// The variadic arguments may start with link and category types, +// and must end with a format string and any arguments. +// It returns the new Problem. +func (f *file) errorf(n ast.Node, confidence float64, args ...interface{}) *Problem { + pos := f.fset.Position(n.Pos()) + if pos.Filename == "" { + pos.Filename = f.filename + } + return f.pkg.errorfAt(pos, confidence, args...) +} + +func (p *pkg) errorfAt(pos token.Position, confidence float64, args ...interface{}) *Problem { + problem := Problem{ + Position: pos, + Confidence: confidence, + } + if pos.Filename != "" { + // The file might not exist in our mapping if a //line directive was encountered. + if f, ok := p.files[pos.Filename]; ok { + problem.LineText = srcLine(f.src, pos) + } + } + +argLoop: + for len(args) > 1 { // always leave at least the format string in args + switch v := args[0].(type) { + case link: + problem.Link = string(v) + case category: + problem.Category = string(v) + default: + break argLoop + } + args = args[1:] + } + + problem.Text = fmt.Sprintf(args[0].(string), args[1:]...) + + p.problems = append(p.problems, problem) + return &p.problems[len(p.problems)-1] +} + +var newImporter = func(fset *token.FileSet) types.ImporterFrom { + return gcexportdata.NewImporter(fset, make(map[string]*types.Package)) +} + +func (p *pkg) typeCheck() error { + config := &types.Config{ + // By setting a no-op error reporter, the type checker does as much work as possible. + Error: func(error) {}, + Importer: newImporter(p.fset), + } + info := &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + } + var anyFile *file + var astFiles []*ast.File + for _, f := range p.files { + anyFile = f + astFiles = append(astFiles, f.f) + } + pkg, err := config.Check(anyFile.f.Name.Name, p.fset, astFiles, info) + // Remember the typechecking info, even if config.Check failed, + // since we will get partial information. + p.typesPkg = pkg + p.typesInfo = info + return err +} + +func (p *pkg) typeOf(expr ast.Expr) types.Type { + if p.typesInfo == nil { + return nil + } + return p.typesInfo.TypeOf(expr) +} + +func (p *pkg) isNamedType(typ types.Type, importPath, name string) bool { + n, ok := typ.(*types.Named) + if !ok { + return false + } + tn := n.Obj() + return tn != nil && tn.Pkg() != nil && tn.Pkg().Path() == importPath && tn.Name() == name +} + +// scopeOf returns the tightest scope encompassing id. +func (p *pkg) scopeOf(id *ast.Ident) *types.Scope { + var scope *types.Scope + if obj := p.typesInfo.ObjectOf(id); obj != nil { + scope = obj.Parent() + } + if scope == p.typesPkg.Scope() { + // We were given a top-level identifier. + // Use the file-level scope instead of the package-level scope. + pos := id.Pos() + for _, f := range p.files { + if f.f.Pos() <= pos && pos < f.f.End() { + scope = p.typesInfo.Scopes[f.f] + break + } + } + } + return scope +} + +func (p *pkg) scanSortable() { + p.sortable = make(map[string]bool) + + // bitfield for which methods exist on each type. + const ( + Len = 1 << iota + Less + Swap + ) + nmap := map[string]int{"Len": Len, "Less": Less, "Swap": Swap} + has := make(map[string]int) + for _, f := range p.files { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + // TODO(dsymonds): We could check the signature to be more precise. + recv := receiverType(fn) + if i, ok := nmap[fn.Name.Name]; ok { + has[recv] |= i + } + return false + }) + } + for typ, ms := range has { + if ms == Len|Less|Swap { + p.sortable[typ] = true + } + } +} + +func (p *pkg) isMain() bool { + for _, f := range p.files { + if f.isMain() { + return true + } + } + return false +} + +func (f *file) isMain() bool { + if f.f.Name.Name == "main" { + return true + } + return false +} + +// lintPackageComment checks package comments. It complains if +// there is no package comment, or if it is not of the right form. +// This has a notable false positive in that a package comment +// could rightfully appear in a different file of the same package, +// but that's not easy to fix since this linter is file-oriented. +func (f *file) lintPackageComment() { + if f.isTest() { + return + } + + const ref = styleGuideBase + "#package-comments" + prefix := "Package " + f.f.Name.Name + " " + + // Look for a detached package comment. + // First, scan for the last comment that occurs before the "package" keyword. + var lastCG *ast.CommentGroup + for _, cg := range f.f.Comments { + if cg.Pos() > f.f.Package { + // Gone past "package" keyword. + break + } + lastCG = cg + } + if lastCG != nil && strings.HasPrefix(lastCG.Text(), prefix) { + endPos := f.fset.Position(lastCG.End()) + pkgPos := f.fset.Position(f.f.Package) + if endPos.Line+1 < pkgPos.Line { + // There isn't a great place to anchor this error; + // the start of the blank lines between the doc and the package statement + // is at least pointing at the location of the problem. + pos := token.Position{ + Filename: endPos.Filename, + // Offset not set; it is non-trivial, and doesn't appear to be needed. + Line: endPos.Line + 1, + Column: 1, + } + f.pkg.errorfAt(pos, 0.9, link(ref), category("comments"), "package comment is detached; there should be no blank lines between it and the package statement") + return + } + } + + if f.f.Doc == nil { + f.errorf(f.f, 0.2, link(ref), category("comments"), "should have a package comment, unless it's in another file for this package") + return + } + s := f.f.Doc.Text() + if ts := strings.TrimLeft(s, " \t"); ts != s { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), "package comment should not have leading space") + s = ts + } + // Only non-main packages need to keep to this form. + if !f.pkg.main && !strings.HasPrefix(s, prefix) { + f.errorf(f.f.Doc, 1, link(ref), category("comments"), `package comment should be of the form "%s..."`, prefix) + } +} + +// lintBlankImports complains if a non-main package has blank imports that are +// not documented. +func (f *file) lintBlankImports() { + // In package main and in tests, we don't complain about blank imports. + if f.pkg.main || f.isTest() { + return + } + + // The first element of each contiguous group of blank imports should have + // an explanatory comment of some kind. + for i, imp := range f.f.Imports { + pos := f.fset.Position(imp.Pos()) + + if !isBlank(imp.Name) { + continue // Ignore non-blank imports. + } + if i > 0 { + prev := f.f.Imports[i-1] + prevPos := f.fset.Position(prev.Pos()) + if isBlank(prev.Name) && prevPos.Line+1 == pos.Line { + continue // A subsequent blank in a group. + } + } + + // This is the first blank import of a group. + if imp.Doc == nil && imp.Comment == nil { + ref := "" + f.errorf(imp, 1, link(ref), category("imports"), "a blank import should be only in a main or test package, or have a comment justifying it") + } + } +} + +// lintImports examines import blocks. +func (f *file) lintImports() { + for i, is := range f.f.Imports { + _ = i + if is.Name != nil && is.Name.Name == "." && !f.isTest() { + f.errorf(is, 1, link(styleGuideBase+"#import-dot"), category("imports"), "should not use dot imports") + } + + } +} + +const docCommentsLink = styleGuideBase + "#doc-comments" + +// lintExported examines the exported names. +// It complains if any required doc comments are missing, +// or if they are not of the right form. The exact rules are in +// lintFuncDoc, lintTypeDoc and lintValueSpecDoc; this function +// also tracks the GenDecl structure being traversed to permit +// doc comments for constants to be on top of the const block. +// It also complains if the names stutter when combined with +// the package name. +func (f *file) lintExported() { + if f.isTest() { + return + } + + var lastGen *ast.GenDecl // last GenDecl entered. + + // Set of GenDecls that have already had missing comments flagged. + genDeclMissingComments := make(map[*ast.GenDecl]bool) + + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return false + } + // token.CONST, token.TYPE or token.VAR + lastGen = v + return true + case *ast.FuncDecl: + f.lintFuncDoc(v) + if v.Recv == nil { + // Only check for stutter on functions, not methods. + // Method names are not used package-qualified. + f.checkStutter(v.Name, "func") + } + // Don't proceed inside funcs. + return false + case *ast.TypeSpec: + // inside a GenDecl, which usually has the doc + doc := v.Doc + if doc == nil { + doc = lastGen.Doc + } + f.lintTypeDoc(v, doc) + f.checkStutter(v.Name, "type") + // Don't proceed inside types. + return false + case *ast.ValueSpec: + f.lintValueSpecDoc(v, lastGen, genDeclMissingComments) + return false + } + return true + }) +} + +var ( + allCapsRE = regexp.MustCompile(`^[A-Z0-9_]+$`) + anyCapsRE = regexp.MustCompile(`[A-Z]`) +) + +// knownNameExceptions is a set of names that are known to be exempt from naming checks. +// This is usually because they are constrained by having to match names in the +// standard library. +var knownNameExceptions = map[string]bool{ + "LastInsertId": true, // must match database/sql + "kWh": true, +} + +func isInTopLevel(f *ast.File, ident *ast.Ident) bool { + path, _ := astutil.PathEnclosingInterval(f, ident.Pos(), ident.End()) + for _, f := range path { + switch f.(type) { + case *ast.File, *ast.GenDecl, *ast.ValueSpec, *ast.Ident: + continue + } + return false + } + return true +} + +// lintNames examines all names in the file. +// It complains if any use underscores or incorrect known initialisms. +func (f *file) lintNames() { + // Package names need slightly different handling than other names. + if strings.Contains(f.f.Name.Name, "_") && !strings.HasSuffix(f.f.Name.Name, "_test") { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("naming"), "don't use an underscore in package name") + } + if anyCapsRE.MatchString(f.f.Name.Name) { + f.errorf(f.f, 1, link("http://golang.org/doc/effective_go.html#package-names"), category("mixed-caps"), "don't use MixedCaps in package name; %s should be %s", f.f.Name.Name, strings.ToLower(f.f.Name.Name)) + } + + check := func(id *ast.Ident, thing string) { + if id.Name == "_" { + return + } + if knownNameExceptions[id.Name] { + return + } + + // Handle two common styles from other languages that don't belong in Go. + if len(id.Name) >= 5 && allCapsRE.MatchString(id.Name) && strings.Contains(id.Name, "_") { + capCount := 0 + for _, c := range id.Name { + if 'A' <= c && c <= 'Z' { + capCount++ + } + } + if capCount >= 2 { + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use ALL_CAPS in Go names; use CamelCase") + return + } + } + if thing == "const" || (thing == "var" && isInTopLevel(f.f, id)) { + if len(id.Name) > 2 && id.Name[0] == 'k' && id.Name[1] >= 'A' && id.Name[1] <= 'Z' { + should := string(id.Name[1]+'a'-'A') + id.Name[2:] + f.errorf(id, 0.8, link(styleGuideBase+"#mixed-caps"), category("naming"), "don't use leading k in Go names; %s %s should be %s", thing, id.Name, should) + } + } + + should := lintName(id.Name) + if id.Name == should { + return + } + + if len(id.Name) > 2 && strings.Contains(id.Name[1:], "_") { + f.errorf(id, 0.9, link("http://golang.org/doc/effective_go.html#mixed-caps"), category("naming"), "don't use underscores in Go names; %s %s should be %s", thing, id.Name, should) + return + } + f.errorf(id, 0.8, link(styleGuideBase+"#initialisms"), category("naming"), "%s %s should be %s", thing, id.Name, should) + } + checkList := func(fl *ast.FieldList, thing string) { + if fl == nil { + return + } + for _, f := range fl.List { + for _, id := range f.Names { + check(id, thing) + } + } + } + f.walk(func(node ast.Node) bool { + switch v := node.(type) { + case *ast.AssignStmt: + if v.Tok == token.ASSIGN { + return true + } + for _, exp := range v.Lhs { + if id, ok := exp.(*ast.Ident); ok { + check(id, "var") + } + } + case *ast.FuncDecl: + if f.isTest() && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) { + return true + } + + thing := "func" + if v.Recv != nil { + thing = "method" + } + + // Exclude naming warnings for functions that are exported to C but + // not exported in the Go API. + // See https://github.com/golang/lint/issues/144. + if ast.IsExported(v.Name.Name) || !isCgoExported(v) { + check(v.Name, thing) + } + + checkList(v.Type.Params, thing+" parameter") + checkList(v.Type.Results, thing+" result") + case *ast.GenDecl: + if v.Tok == token.IMPORT { + return true + } + var thing string + switch v.Tok { + case token.CONST: + thing = "const" + case token.TYPE: + thing = "type" + case token.VAR: + thing = "var" + } + for _, spec := range v.Specs { + switch s := spec.(type) { + case *ast.TypeSpec: + check(s.Name, thing) + case *ast.ValueSpec: + for _, id := range s.Names { + check(id, thing) + } + } + } + case *ast.InterfaceType: + // Do not check interface method names. + // They are often constrainted by the method names of concrete types. + for _, x := range v.Methods.List { + ft, ok := x.Type.(*ast.FuncType) + if !ok { // might be an embedded interface name + continue + } + checkList(ft.Params, "interface method parameter") + checkList(ft.Results, "interface method result") + } + case *ast.RangeStmt: + if v.Tok == token.ASSIGN { + return true + } + if id, ok := v.Key.(*ast.Ident); ok { + check(id, "range var") + } + if id, ok := v.Value.(*ast.Ident); ok { + check(id, "range var") + } + case *ast.StructType: + for _, f := range v.Fields.List { + for _, id := range f.Names { + check(id, "struct field") + } + } + } + return true + }) +} + +// lintName returns a different name if it should be different. +func lintName(name string) (should string) { + // Fast path for simple cases: "_" and all lowercase. + if name == "_" { + return name + } + allLower := true + for _, r := range name { + if !unicode.IsLower(r) { + allLower = false + break + } + } + if allLower { + return name + } + + // Split camelCase at any lower->upper transition, and split on underscores. + // Check each word for common initialisms. + runes := []rune(name) + w, i := 0, 0 // index of start of word, scan + for i+1 <= len(runes) { + eow := false // whether we hit the end of a word + if i+1 == len(runes) { + eow = true + } else if runes[i+1] == '_' { + // underscore; shift the remainder forward over any run of underscores + eow = true + n := 1 + for i+n+1 < len(runes) && runes[i+n+1] == '_' { + n++ + } + + // Leave at most one underscore if the underscore is between two digits + if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) { + n-- + } + + copy(runes[i+1:], runes[i+n+1:]) + runes = runes[:len(runes)-n] + } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) { + // lower->non-lower + eow = true + } + i++ + if !eow { + continue + } + + // [w,i) is a word. + word := string(runes[w:i]) + if u := strings.ToUpper(word); commonInitialisms[u] { + // Keep consistent case, which is lowercase only at the start. + if w == 0 && unicode.IsLower(runes[w]) { + u = strings.ToLower(u) + } + // All the common initialisms are ASCII, + // so we can replace the bytes exactly. + copy(runes[w:], []rune(u)) + } else if w > 0 && strings.ToLower(word) == word { + // already all lowercase, and not the first word, so uppercase the first character. + runes[w] = unicode.ToUpper(runes[w]) + } + w = i + } + return string(runes) +} + +// commonInitialisms is a set of common initialisms. +// Only add entries that are highly unlikely to be non-initialisms. +// For instance, "ID" is fine (Freudian code is rare), but "AND" is not. +var commonInitialisms = map[string]bool{ + "ACL": true, + "API": true, + "ASCII": true, + "CPU": true, + "CSS": true, + "DNS": true, + "EOF": true, + "GUID": true, + "HTML": true, + "HTTP": true, + "HTTPS": true, + "ID": true, + "IP": true, + "JSON": true, + "LHS": true, + "QPS": true, + "RAM": true, + "RHS": true, + "RPC": true, + "SLA": true, + "SMTP": true, + "SQL": true, + "SSH": true, + "TCP": true, + "TLS": true, + "TTL": true, + "UDP": true, + "UI": true, + "UID": true, + "UUID": true, + "URI": true, + "URL": true, + "UTF8": true, + "VM": true, + "XML": true, + "XMPP": true, + "XSRF": true, + "XSS": true, +} + +// lintTypeDoc examines the doc comment on a type. +// It complains if they are missing from an exported type, +// or if they are not of the standard form. +func (f *file) lintTypeDoc(t *ast.TypeSpec, doc *ast.CommentGroup) { + if !ast.IsExported(t.Name.Name) { + return + } + if doc == nil { + f.errorf(t, 1, link(docCommentsLink), category("comments"), "exported type %v should have comment or be unexported", t.Name) + return + } + + s := doc.Text() + articles := [...]string{"A", "An", "The"} + for _, a := range articles { + if strings.HasPrefix(s, a+" ") { + s = s[len(a)+1:] + break + } + } + if !strings.HasPrefix(s, t.Name.Name+" ") { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported type %v should be of the form "%v ..." (with optional leading article)`, t.Name, t.Name) + } +} + +var commonMethods = map[string]bool{ + "Error": true, + "Read": true, + "ServeHTTP": true, + "String": true, + "Write": true, +} + +// lintFuncDoc examines doc comments on functions and methods. +// It complains if they are missing, or not of the right form. +// It has specific exclusions for well-known methods (see commonMethods above). +func (f *file) lintFuncDoc(fn *ast.FuncDecl) { + if !ast.IsExported(fn.Name.Name) { + // func is unexported + return + } + kind := "function" + name := fn.Name.Name + if fn.Recv != nil && len(fn.Recv.List) > 0 { + // method + kind = "method" + recv := receiverType(fn) + if !ast.IsExported(recv) { + // receiver is unexported + return + } + if commonMethods[name] { + return + } + switch name { + case "Len", "Less", "Swap": + if f.pkg.sortable[recv] { + return + } + } + name = recv + "." + name + } + if fn.Doc == nil { + f.errorf(fn, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment or be unexported", kind, name) + return + } + s := fn.Doc.Text() + prefix := fn.Name.Name + " " + if !strings.HasPrefix(s, prefix) { + f.errorf(fn.Doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +// lintValueSpecDoc examines package-global variables and constants. +// It complains if they are not individually declared, +// or if they are not suitably documented in the right form (unless they are in a block that is commented). +func (f *file) lintValueSpecDoc(vs *ast.ValueSpec, gd *ast.GenDecl, genDeclMissingComments map[*ast.GenDecl]bool) { + kind := "var" + if gd.Tok == token.CONST { + kind = "const" + } + + if len(vs.Names) > 1 { + // Check that none are exported except for the first. + for _, n := range vs.Names[1:] { + if ast.IsExported(n.Name) { + f.errorf(vs, 1, category("comments"), "exported %s %s should have its own declaration", kind, n.Name) + return + } + } + } + + // Only one name. + name := vs.Names[0].Name + if !ast.IsExported(name) { + return + } + + if vs.Doc == nil && gd.Doc == nil { + if genDeclMissingComments[gd] { + return + } + block := "" + if kind == "const" && gd.Lparen.IsValid() { + block = " (or a comment on this block)" + } + f.errorf(vs, 1, link(docCommentsLink), category("comments"), "exported %s %s should have comment%s or be unexported", kind, name, block) + genDeclMissingComments[gd] = true + return + } + // If this GenDecl has parens and a comment, we don't check its comment form. + if gd.Lparen.IsValid() && gd.Doc != nil { + return + } + // The relevant text to check will be on either vs.Doc or gd.Doc. + // Use vs.Doc preferentially. + doc := vs.Doc + if doc == nil { + doc = gd.Doc + } + prefix := name + " " + if !strings.HasPrefix(doc.Text(), prefix) { + f.errorf(doc, 1, link(docCommentsLink), category("comments"), `comment on exported %s %s should be of the form "%s..."`, kind, name, prefix) + } +} + +func (f *file) checkStutter(id *ast.Ident, thing string) { + pkg, name := f.f.Name.Name, id.Name + if !ast.IsExported(name) { + // unexported name + return + } + // A name stutters if the package name is a strict prefix + // and the next character of the name starts a new word. + if len(name) <= len(pkg) { + // name is too short to stutter. + // This permits the name to be the same as the package name. + return + } + if !strings.EqualFold(pkg, name[:len(pkg)]) { + return + } + // We can assume the name is well-formed UTF-8. + // If the next rune after the package name is uppercase or an underscore + // the it's starting a new word and thus this name stutters. + rem := name[len(pkg):] + if next, _ := utf8.DecodeRuneInString(rem); next == '_' || unicode.IsUpper(next) { + f.errorf(id, 0.8, link(styleGuideBase+"#package-names"), category("naming"), "%s name will be used as %s.%s by other packages, and that stutters; consider calling this %s", thing, pkg, name, rem) + } +} + +// zeroLiteral is a set of ast.BasicLit values that are zero values. +// It is not exhaustive. +var zeroLiteral = map[string]bool{ + "false": true, // bool + // runes + `'\x00'`: true, + `'\000'`: true, + // strings + `""`: true, + "``": true, + // numerics + "0": true, + "0.": true, + "0.0": true, + "0i": true, +} + +// lintElses examines else blocks. It complains about any else block whose if block ends in a return. +func (f *file) lintElses() { + // We don't want to flag if { } else if { } else { } constructions. + // They will appear as an IfStmt whose Else field is also an IfStmt. + // Record such a node so we ignore it when we visit it. + ignore := make(map[*ast.IfStmt]bool) + + f.walk(func(node ast.Node) bool { + ifStmt, ok := node.(*ast.IfStmt) + if !ok || ifStmt.Else == nil { + return true + } + if elseif, ok := ifStmt.Else.(*ast.IfStmt); ok { + ignore[elseif] = true + return true + } + if ignore[ifStmt] { + return true + } + if _, ok := ifStmt.Else.(*ast.BlockStmt); !ok { + // only care about elses without conditions + return true + } + if len(ifStmt.Body.List) == 0 { + return true + } + shortDecl := false // does the if statement have a ":=" initialization statement? + if ifStmt.Init != nil { + if as, ok := ifStmt.Init.(*ast.AssignStmt); ok && as.Tok == token.DEFINE { + shortDecl = true + } + } + lastStmt := ifStmt.Body.List[len(ifStmt.Body.List)-1] + if _, ok := lastStmt.(*ast.ReturnStmt); ok { + extra := "" + if shortDecl { + extra = " (move short variable declaration to its own line if necessary)" + } + f.errorf(ifStmt.Else, 1, link(styleGuideBase+"#indent-error-flow"), category("indent"), "if block ends with a return statement, so drop this else and outdent its block"+extra) + } + return true + }) +} + +// lintRanges examines range clauses. It complains about redundant constructions. +func (f *file) lintRanges() { + f.walk(func(node ast.Node) bool { + rs, ok := node.(*ast.RangeStmt) + if !ok { + return true + } + + if isIdent(rs.Key, "_") && (rs.Value == nil || isIdent(rs.Value, "_")) { + p := f.errorf(rs.Key, 1, category("range-loop"), "should omit values from range; this loop is equivalent to `for range ...`") + + newRS := *rs // shallow copy + newRS.Value = nil + newRS.Key = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + + return true + } + + if isIdent(rs.Value, "_") { + p := f.errorf(rs.Value, 1, category("range-loop"), "should omit 2nd value from range; this loop is equivalent to `for %s %s range ...`", f.render(rs.Key), rs.Tok) + + newRS := *rs // shallow copy + newRS.Value = nil + p.ReplacementLine = f.firstLineOf(&newRS, rs) + } + + return true + }) +} + +// lintErrorf examines errors.New and testing.Error calls. It complains if its only argument is an fmt.Sprintf invocation. +func (f *file) lintErrorf() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok || len(ce.Args) != 1 { + return true + } + isErrorsNew := isPkgDot(ce.Fun, "errors", "New") + var isTestingError bool + se, ok := ce.Fun.(*ast.SelectorExpr) + if ok && se.Sel.Name == "Error" { + if typ := f.pkg.typeOf(se.X); typ != nil { + isTestingError = typ.String() == "*testing.T" + } + } + if !isErrorsNew && !isTestingError { + return true + } + if !f.imports("errors") { + return true + } + arg := ce.Args[0] + ce, ok = arg.(*ast.CallExpr) + if !ok || !isPkgDot(ce.Fun, "fmt", "Sprintf") { + return true + } + errorfPrefix := "fmt" + if isTestingError { + errorfPrefix = f.render(se.X) + } + p := f.errorf(node, 1, category("errors"), "should replace %s(fmt.Sprintf(...)) with %s.Errorf(...)", f.render(se), errorfPrefix) + + m := f.srcLineWithMatch(ce, `^(.*)`+f.render(se)+`\(fmt\.Sprintf\((.*)\)\)(.*)$`) + if m != nil { + p.ReplacementLine = m[1] + errorfPrefix + ".Errorf(" + m[2] + ")" + m[3] + } + + return true + }) +} + +// lintErrors examines global error vars. It complains if they aren't named in the standard way. +func (f *file) lintErrors() { + for _, decl := range f.f.Decls { + gd, ok := decl.(*ast.GenDecl) + if !ok || gd.Tok != token.VAR { + continue + } + for _, spec := range gd.Specs { + spec := spec.(*ast.ValueSpec) + if len(spec.Names) != 1 || len(spec.Values) != 1 { + continue + } + ce, ok := spec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + continue + } + + id := spec.Names[0] + prefix := "err" + if id.IsExported() { + prefix = "Err" + } + if !strings.HasPrefix(id.Name, prefix) { + f.errorf(id, 0.9, category("naming"), "error var %s should have name of the form %sFoo", id.Name, prefix) + } + } + } +} + +func lintErrorString(s string) (isClean bool, conf float64) { + const basicConfidence = 0.8 + const capConfidence = basicConfidence - 0.2 + first, firstN := utf8.DecodeRuneInString(s) + last, _ := utf8.DecodeLastRuneInString(s) + if last == '.' || last == ':' || last == '!' || last == '\n' { + return false, basicConfidence + } + if unicode.IsUpper(first) { + // People use proper nouns and exported Go identifiers in error strings, + // so decrease the confidence of warnings for capitalization. + if len(s) <= firstN { + return false, capConfidence + } + // Flag strings starting with something that doesn't look like an initialism. + if second, _ := utf8.DecodeRuneInString(s[firstN:]); !unicode.IsUpper(second) { + return false, capConfidence + } + } + return true, 0 +} + +// lintErrorStrings examines error strings. +// It complains if they are capitalized or end in punctuation or a newline. +func (f *file) lintErrorStrings() { + f.walk(func(node ast.Node) bool { + ce, ok := node.(*ast.CallExpr) + if !ok { + return true + } + if !isPkgDot(ce.Fun, "errors", "New") && !isPkgDot(ce.Fun, "fmt", "Errorf") { + return true + } + if len(ce.Args) < 1 { + return true + } + str, ok := ce.Args[0].(*ast.BasicLit) + if !ok || str.Kind != token.STRING { + return true + } + s, _ := strconv.Unquote(str.Value) // can assume well-formed Go + if s == "" { + return true + } + clean, conf := lintErrorString(s) + if clean { + return true + } + + f.errorf(str, conf, link(styleGuideBase+"#error-strings"), category("errors"), + "error strings should not be capitalized or end with punctuation or a newline") + return true + }) +} + +// lintReceiverNames examines receiver names. It complains about inconsistent +// names used for the same type and names such as "this". +func (f *file) lintReceiverNames() { + typeReceiver := map[string]string{} + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Recv == nil || len(fn.Recv.List) == 0 { + return true + } + names := fn.Recv.List[0].Names + if len(names) < 1 { + return true + } + name := names[0].Name + const ref = styleGuideBase + "#receiver-names" + if name == "_" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should not be an underscore, omit the name if it is unused`) + return true + } + if name == "this" || name == "self" { + f.errorf(n, 1, link(ref), category("naming"), `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`) + return true + } + recv := receiverType(fn) + if prev, ok := typeReceiver[recv]; ok && prev != name { + f.errorf(n, 1, link(ref), category("naming"), "receiver name %s should be consistent with previous receiver name %s for %s", name, prev, recv) + return true + } + typeReceiver[recv] = name + return true + }) +} + +// lintIncDec examines statements that increment or decrement a variable. +// It complains if they don't use x++ or x--. +func (f *file) lintIncDec() { + f.walk(func(n ast.Node) bool { + as, ok := n.(*ast.AssignStmt) + if !ok { + return true + } + if len(as.Lhs) != 1 { + return true + } + if !isOne(as.Rhs[0]) { + return true + } + var suffix string + switch as.Tok { + case token.ADD_ASSIGN: + suffix = "++" + case token.SUB_ASSIGN: + suffix = "--" + default: + return true + } + f.errorf(as, 0.8, category("unary-op"), "should replace %s with %s%s", f.render(as), f.render(as.Lhs[0]), suffix) + return true + }) +} + +// lintErrorReturn examines function declarations that return an error. +// It complains if the error isn't the last parameter. +func (f *file) lintErrorReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || fn.Type.Results == nil { + return true + } + ret := fn.Type.Results.List + if len(ret) <= 1 { + return true + } + if isIdent(ret[len(ret)-1].Type, "error") { + return true + } + // An error return parameter should be the last parameter. + // Flag any error parameters found before the last. + for _, r := range ret[:len(ret)-1] { + if isIdent(r.Type, "error") { + f.errorf(fn, 0.9, category("arg-order"), "error should be the last type when returning multiple items") + break // only flag one + } + } + return true + }) +} + +// lintUnexportedReturn examines exported function declarations. +// It complains if any return an unexported type. +func (f *file) lintUnexportedReturn() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok { + return true + } + if fn.Type.Results == nil { + return false + } + if !fn.Name.IsExported() { + return false + } + thing := "func" + if fn.Recv != nil && len(fn.Recv.List) > 0 { + thing = "method" + if !ast.IsExported(receiverType(fn)) { + // Don't report exported methods of unexported types, + // such as private implementations of sort.Interface. + return false + } + } + for _, ret := range fn.Type.Results.List { + typ := f.pkg.typeOf(ret.Type) + if exportedType(typ) { + continue + } + f.errorf(ret.Type, 0.8, category("unexported-type-in-api"), + "exported %s %s returns unexported type %s, which can be annoying to use", + thing, fn.Name.Name, typ) + break // only flag one + } + return false + }) +} + +// exportedType reports whether typ is an exported type. +// It is imprecise, and will err on the side of returning true, +// such as for composite types. +func exportedType(typ types.Type) bool { + switch T := typ.(type) { + case *types.Named: + // Builtin types have no package. + return T.Obj().Pkg() == nil || T.Obj().Exported() + case *types.Map: + return exportedType(T.Key()) && exportedType(T.Elem()) + case interface { + Elem() types.Type + }: // array, slice, pointer, chan + return exportedType(T.Elem()) + } + // Be conservative about other types, such as struct, interface, etc. + return true +} + +// timeSuffixes is a list of name suffixes that imply a time unit. +// This is not an exhaustive list. +var timeSuffixes = []string{ + "Sec", "Secs", "Seconds", + "Msec", "Msecs", + "Milli", "Millis", "Milliseconds", + "Usec", "Usecs", "Microseconds", + "MS", "Ms", +} + +func (f *file) lintTimeNames() { + f.walk(func(node ast.Node) bool { + v, ok := node.(*ast.ValueSpec) + if !ok { + return true + } + for _, name := range v.Names { + origTyp := f.pkg.typeOf(name) + // Look for time.Duration or *time.Duration; + // the latter is common when using flag.Duration. + typ := origTyp + if pt, ok := typ.(*types.Pointer); ok { + typ = pt.Elem() + } + if !f.pkg.isNamedType(typ, "time", "Duration") { + continue + } + suffix := "" + for _, suf := range timeSuffixes { + if strings.HasSuffix(name.Name, suf) { + suffix = suf + break + } + } + if suffix == "" { + continue + } + f.errorf(v, 0.9, category("time"), "var %s is of type %v; don't use unit-specific suffix %q", name.Name, origTyp, suffix) + } + return true + }) +} + +// lintContextKeyTypes checks for call expressions to context.WithValue with +// basic types used for the key argument. +// See: https://golang.org/issue/17293 +func (f *file) lintContextKeyTypes() { + f.walk(func(node ast.Node) bool { + switch node := node.(type) { + case *ast.CallExpr: + f.checkContextKeyType(node) + } + + return true + }) +} + +// checkContextKeyType reports an error if the call expression calls +// context.WithValue with a key argument of basic type. +func (f *file) checkContextKeyType(x *ast.CallExpr) { + sel, ok := x.Fun.(*ast.SelectorExpr) + if !ok { + return + } + pkg, ok := sel.X.(*ast.Ident) + if !ok || pkg.Name != "context" { + return + } + if sel.Sel.Name != "WithValue" { + return + } + + // key is second argument to context.WithValue + if len(x.Args) != 3 { + return + } + key := f.pkg.typesInfo.Types[x.Args[1]] + + if ktyp, ok := key.Type.(*types.Basic); ok && ktyp.Kind() != types.Invalid { + f.errorf(x, 1.0, category("context"), fmt.Sprintf("should not use basic type %s as key in context.WithValue", key.Type)) + } +} + +// lintContextArgs examines function declarations that contain an +// argument with a type of context.Context +// It complains if that argument isn't the first parameter. +func (f *file) lintContextArgs() { + f.walk(func(n ast.Node) bool { + fn, ok := n.(*ast.FuncDecl) + if !ok || len(fn.Type.Params.List) <= 1 { + return true + } + // A context.Context should be the first parameter of a function. + // Flag any that show up after the first. + for _, arg := range fn.Type.Params.List[1:] { + if isPkgDot(arg.Type, "context", "Context") { + f.errorf(fn, 0.9, link("https://golang.org/pkg/context/"), category("arg-order"), "context.Context should be the first parameter of a function") + break // only flag one + } + } + return true + }) +} + +// containsComments returns whether the interval [start, end) contains any +// comments without "// MATCH " prefix. +func (f *file) containsComments(start, end token.Pos) bool { + for _, cgroup := range f.f.Comments { + comments := cgroup.List + if comments[0].Slash >= end { + // All comments starting with this group are after end pos. + return false + } + if comments[len(comments)-1].Slash < start { + // Comments group ends before start pos. + continue + } + for _, c := range comments { + if start <= c.Slash && c.Slash < end && !strings.HasPrefix(c.Text, "// MATCH ") { + return true + } + } + } + return false +} + +// receiverType returns the named type of the method receiver, sans "*", +// or "invalid-type" if fn.Recv is ill formed. +func receiverType(fn *ast.FuncDecl) string { + switch e := fn.Recv.List[0].Type.(type) { + case *ast.Ident: + return e.Name + case *ast.StarExpr: + if id, ok := e.X.(*ast.Ident); ok { + return id.Name + } + } + // The parser accepts much more than just the legal forms. + return "invalid-type" +} + +func (f *file) walk(fn func(ast.Node) bool) { + ast.Walk(walker(fn), f.f) +} + +func (f *file) render(x interface{}) string { + var buf bytes.Buffer + if err := printer.Fprint(&buf, f.fset, x); err != nil { + panic(err) + } + return buf.String() +} + +func (f *file) debugRender(x interface{}) string { + var buf bytes.Buffer + if err := ast.Fprint(&buf, f.fset, x, nil); err != nil { + panic(err) + } + return buf.String() +} + +// walker adapts a function to satisfy the ast.Visitor interface. +// The function return whether the walk should proceed into the node's children. +type walker func(ast.Node) bool + +func (w walker) Visit(node ast.Node) ast.Visitor { + if w(node) { + return w + } + return nil +} + +func isIdent(expr ast.Expr, ident string) bool { + id, ok := expr.(*ast.Ident) + return ok && id.Name == ident +} + +// isBlank returns whether id is the blank identifier "_". +// If id == nil, the answer is false. +func isBlank(id *ast.Ident) bool { return id != nil && id.Name == "_" } + +func isPkgDot(expr ast.Expr, pkg, name string) bool { + sel, ok := expr.(*ast.SelectorExpr) + return ok && isIdent(sel.X, pkg) && isIdent(sel.Sel, name) +} + +func isOne(expr ast.Expr) bool { + lit, ok := expr.(*ast.BasicLit) + return ok && lit.Kind == token.INT && lit.Value == "1" +} + +func isCgoExported(f *ast.FuncDecl) bool { + if f.Recv != nil || f.Doc == nil { + return false + } + + cgoExport := regexp.MustCompile(fmt.Sprintf("(?m)^//export %s$", regexp.QuoteMeta(f.Name.Name))) + for _, c := range f.Doc.List { + if cgoExport.MatchString(c.Text) { + return true + } + } + return false +} + +var basicTypeKinds = map[types.BasicKind]string{ + types.UntypedBool: "bool", + types.UntypedInt: "int", + types.UntypedRune: "rune", + types.UntypedFloat: "float64", + types.UntypedComplex: "complex128", + types.UntypedString: "string", +} + +// isUntypedConst reports whether expr is an untyped constant, +// and indicates what its default type is. +// scope may be nil. +func (f *file) isUntypedConst(expr ast.Expr) (defType string, ok bool) { + // Re-evaluate expr outside of its context to see if it's untyped. + // (An expr evaluated within, for example, an assignment context will get the type of the LHS.) + exprStr := f.render(expr) + tv, err := types.Eval(f.fset, f.pkg.typesPkg, expr.Pos(), exprStr) + if err != nil { + return "", false + } + if b, ok := tv.Type.(*types.Basic); ok { + if dt, ok := basicTypeKinds[b.Kind()]; ok { + return dt, true + } + } + + return "", false +} + +// firstLineOf renders the given node and returns its first line. +// It will also match the indentation of another node. +func (f *file) firstLineOf(node, match ast.Node) string { + line := f.render(node) + if i := strings.Index(line, "\n"); i >= 0 { + line = line[:i] + } + return f.indentOf(match) + line +} + +func (f *file) indentOf(node ast.Node) string { + line := srcLine(f.src, f.fset.Position(node.Pos())) + for i, r := range line { + switch r { + case ' ', '\t': + default: + return line[:i] + } + } + return line // unusual or empty line +} + +func (f *file) srcLineWithMatch(node ast.Node, pattern string) (m []string) { + line := srcLine(f.src, f.fset.Position(node.Pos())) + line = strings.TrimSuffix(line, "\n") + rx := regexp.MustCompile(pattern) + return rx.FindStringSubmatch(line) +} + +// imports returns true if the current file imports the specified package path. +func (f *file) imports(importPath string) bool { + all := astutil.Imports(f.fset, f.f) + for _, p := range all { + for _, i := range p { + uq, err := strconv.Unquote(i.Path.Value) + if err == nil && importPath == uq { + return true + } + } + } + return false +} + +// srcLine returns the complete line at p, including the terminating newline. +func srcLine(src []byte, p token.Position) string { + // Run to end of line in both directions if not at line start/end. + lo, hi := p.Offset, p.Offset+1 + for lo > 0 && src[lo-1] != '\n' { + lo-- + } + for hi < len(src) && src[hi-1] != '\n' { + hi++ + } + return string(src[lo:hi]) +} diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 14e4d5caa..6e5c81acd 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -7,6 +7,7 @@ package unix import ( + "math/bits" "unsafe" ) @@ -79,50 +80,7 @@ func (s *CPUSet) IsSet(cpu int) bool { func (s *CPUSet) Count() int { c := 0 for _, b := range s { - c += onesCount64(uint64(b)) + c += bits.OnesCount64(uint64(b)) } return c } - -// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. -// Once this package can require Go 1.9, we can delete this -// and update the caller to use bits.OnesCount64. -func onesCount64(x uint64) int { - const m0 = 0x5555555555555555 // 01010101 ... - const m1 = 0x3333333333333333 // 00110011 ... - const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - - // Unused in this function, but definitions preserved for - // documentation purposes: - // - // const m3 = 0x00ff00ff00ff00ff // etc. - // const m4 = 0x0000ffff0000ffff - // - // Implementation: Parallel summing of adjacent bits. - // See "Hacker's Delight", Chap. 5: Counting Bits. - // The following pattern shows the general approach: - // - // x = x>>1&(m0&m) + x&(m0&m) - // x = x>>2&(m1&m) + x&(m1&m) - // x = x>>4&(m2&m) + x&(m2&m) - // x = x>>8&(m3&m) + x&(m3&m) - // x = x>>16&(m4&m) + x&(m4&m) - // x = x>>32&(m5&m) + x&(m5&m) - // return int(x) - // - // Masking (& operations) can be left away when there's no - // danger that a field's sum will carry over into the next - // field: Since the result cannot be > 64, 8 bits is enough - // and we can ignore the masks for the shifts by 8 and up. - // Per "Hacker's Delight", the first line can be simplified - // more, but it saves at best one instruction, so we leave - // it alone for clarity. - const m = 1<<64 - 1 - x = x>>1&(m0&m) + x&(m0&m) - x = x>>2&(m1&m) + x&(m1&m) - x = (x>>4 + x) & (m2 & m) - x += x >> 8 - x += x >> 16 - x += x >> 32 - return int(x) & (1<<7 - 1) -} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index f121a8d64..3559e5dcb 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -6,7 +6,19 @@ package unix -import "runtime" +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // @@ -14,7 +26,7 @@ import "runtime" func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctlSetWinsize(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } @@ -24,7 +36,30 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctlSetTermios(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 14624b953..8a2dcfa51 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -183,20 +183,26 @@ struct ltchars { #include #include #include +#include #include +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include #include #include #include @@ -206,26 +212,23 @@ struct ltchars { #include #include #include +#include #include +#include #include #include +#include #include -#include #include #include -#include -#include -#include #include -#include -#include +#include #include -#include +#include +#include +#include #include -#include -#include -#include -#include + #include #include @@ -264,6 +267,11 @@ struct ltchars { #define FS_KEY_DESC_PREFIX "fscrypt:" #define FS_KEY_DESC_PREFIX_SIZE 8 #define FS_MAX_KEY_SIZE 64 + +// The code generator produces -0x1 for (~0), but an unsigned value is necessary +// for the tipc_subscr timeout __u32 field. +#undef TIPC_WAIT_FOREVER +#define TIPC_WAIT_FOREVER 0xffffffff ' includes_NetBSD=' @@ -451,6 +459,7 @@ ccflags="$@" $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || $2 ~ /^KEXEC_/ || @@ -506,6 +515,7 @@ ccflags="$@" $2 ~ /^XDP_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || + $2 ~ /^TIPC_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 1aa065f9c..9ad8a0d4a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -350,49 +350,12 @@ func (w WaitStatus) Signal() Signal { func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } -func (w WaitStatus) CoreDump() bool { return w&0x200 != 0 } +func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, // Therefore, the programmer must call dup2 instead of fcntl in this case. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index bf05603f1..b3c8e3301 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 13d4321f4..9a6e02417 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 97a8eef6f..3e6671426 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -413,8 +413,6 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e return kevent(kq, change, len(changes), event, len(events), timeout) } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - // sysctlmib translates name to mib number and appends any additional args. func sysctlmib(name string, args ...int) ([]_C_int, error) { // Translate name to mib number. diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 3e1cdfb50..f26a19ebd 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -339,43 +339,6 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index cd8be182a..cf1bec6a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,7 @@ import ( "syscall" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func setTimespec(sec, nsec int64) Timespec { @@ -45,6 +46,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index d0d07243c..5867ed00b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,7 @@ import ( "syscall" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func setTimespec(sec, nsec int64) Timespec { @@ -45,6 +46,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 01e8a38a9..e199e12a5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -12,6 +12,10 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -45,6 +49,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index e674f81da..2c50ca901 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -14,6 +14,10 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -47,6 +51,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 260a400f9..99d875624 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -14,6 +14,8 @@ package unix import "unsafe" +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -150,43 +152,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) if err != nil { diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 9babb31ea..a6b4830ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 329d240b9..b62231bca 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -36,6 +36,8 @@ var ( // INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h const _ino64First = 1200031 +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -201,43 +203,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 21e03958c..dcc56457a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9c945a657..321c3bace 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5cd6243f2..697700831 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a31805487..dbbbfd603 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 637b5017b..b2c2d9b2e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -71,6 +71,17 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // IoctlSetPointerInt performs an ioctl operation which sets an // integer value on fd, using the specified request number. The ioctl // argument is called with a pointer to the integer value, rather than @@ -80,52 +91,18 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) } -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) @@ -798,6 +775,70 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } +// SockaddrTIPC implements the Sockaddr interface for AF_TIPC type sockets. +// For more information on TIPC, see: http://tipc.sourceforge.net/. +type SockaddrTIPC struct { + // Scope is the publication scopes when binding service/service range. + // Should be set to TIPC_CLUSTER_SCOPE or TIPC_NODE_SCOPE. + Scope int + + // Addr is the type of address used to manipulate a socket. Addr must be + // one of: + // - *TIPCSocketAddr: "id" variant in the C addr union + // - *TIPCServiceRange: "nameseq" variant in the C addr union + // - *TIPCServiceName: "name" variant in the C addr union + // + // If nil, EINVAL will be returned when the structure is used. + Addr TIPCAddr + + raw RawSockaddrTIPC +} + +// TIPCAddr is implemented by types that can be used as an address for +// SockaddrTIPC. It is only implemented by *TIPCSocketAddr, *TIPCServiceRange, +// and *TIPCServiceName. +type TIPCAddr interface { + tipcAddrtype() uint8 + tipcAddr() [12]byte +} + +func (sa *TIPCSocketAddr) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCSocketAddr{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCSocketAddr) tipcAddrtype() uint8 { return TIPC_SOCKET_ADDR } + +func (sa *TIPCServiceRange) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceRange{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceRange) tipcAddrtype() uint8 { return TIPC_SERVICE_RANGE } + +func (sa *TIPCServiceName) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceName{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceName) tipcAddrtype() uint8 { return TIPC_SERVICE_ADDR } + +func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Addr == nil { + return nil, 0, EINVAL + } + + sa.raw.Family = AF_TIPC + sa.raw.Scope = int8(sa.Scope) + sa.raw.Addrtype = sa.Addr.tipcAddrtype() + sa.raw.Addr = sa.Addr.tipcAddr() + + return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -923,6 +964,27 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } + return sa, nil + case AF_TIPC: + pp := (*RawSockaddrTIPC)(unsafe.Pointer(rsa)) + + sa := &SockaddrTIPC{ + Scope: int(pp.Scope), + } + + // Determine which union variant is present in pp.Addr by checking + // pp.Addrtype. + switch pp.Addrtype { + case TIPC_SERVICE_RANGE: + sa.Addr = (*TIPCServiceRange)(unsafe.Pointer(&pp.Addr)) + case TIPC_SERVICE_ADDR: + sa.Addr = (*TIPCServiceName)(unsafe.Pointer(&pp.Addr)) + case TIPC_SOCKET_ADDR: + sa.Addr = (*TIPCSocketAddr)(unsafe.Pointer(&pp.Addr)) + default: + return nil, EINVAL + } + return sa, nil } return nil, EAFNOSUPPORT @@ -1160,6 +1222,34 @@ func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer) } +// KeyctlRestrictKeyring implements the KEYCTL_RESTRICT_KEYRING command. This +// command limits the set of keys that can be linked to the keyring, regardless +// of keyring permissions. The command requires the "setattr" permission. +// +// When called with an empty keyType the command locks the keyring, preventing +// any further keys from being linked to the keyring. +// +// The "asymmetric" keyType defines restrictions requiring key payloads to be +// DER encoded X.509 certificates signed by keys in another keyring. Restrictions +// for "asymmetric" include "builtin_trusted", "builtin_and_secondary_trusted", +// "key_or_keyring:", and "key_or_keyring::chain". +// +// As of Linux 4.12, only the "asymmetric" keyType defines type-specific +// restrictions. +// +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_restrict_keyring.3.html +// http://man7.org/linux/man-pages/man2/keyctl.2.html +func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error { + if keyType == "" { + return keyctlRestrictKeyring(KEYCTL_RESTRICT_KEYRING, ringid) + } + return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) +} + +//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL + func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index e2f8cf6e5..e7fa665e6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -372,6 +372,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 87a30744d..088ce0f93 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -163,6 +163,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index f62679443..11930fc8f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -252,6 +252,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cb20b15d5..251e2d971 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -180,6 +180,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index b3b21ec1e..7562fe97b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -208,6 +208,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 5144d4e13..a939ff8f2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -220,6 +220,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 0a100b66a..28d6d0f22 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -91,6 +91,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6230f6405..6798c2625 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -179,6 +179,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index f81dbdc9c..eb5cb1a71 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -120,6 +120,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index b69565616..37321c12e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -107,6 +107,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5ef309040..3e3f07507 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -18,6 +18,8 @@ import ( "unsafe" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -187,43 +189,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24f74e58c..24da8b524 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 6878bf7ff..25a0ac825 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index dbbfcf71d..21591ecd4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index f3434465a..804749635 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 1a074b2fe..035c043a7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -18,6 +18,8 @@ import ( "unsafe" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -178,43 +180,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index d62da60d1..42b5a0e51 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 9a35334cb..6ea4b4883 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 5d812aaea..1c3d26fa2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 0fb39cf5e..a8c458cb0 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0153a316d..1610f551d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -553,40 +553,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetInt(fd int, req uint, value int) (err error) { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetTermio(fd int, req uint) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 91c32ddf0..b22a34d7a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -18,6 +18,10 @@ func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 3fb475bcc..1875f4595 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1087,6 +1091,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1099,6 +1114,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1408,6 +1425,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1694,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1688,6 +1711,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1726,6 +1750,10 @@ const ( PTRACE_SINGLEBLOCK = 0x21 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 @@ -1786,7 +1814,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1887,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1912,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1920,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1932,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1941,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2027,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2167,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2434,6 +2468,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2447,7 +2546,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2745,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2763,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9c4e19f9a..4af5477da 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1087,6 +1091,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1099,6 +1114,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1408,6 +1425,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1674,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1689,6 +1712,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1727,6 +1751,10 @@ const ( PTRACE_SINGLEBLOCK = 0x21 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 @@ -1787,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1860,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1884,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1891,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1903,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1911,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1997,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2135,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2435,6 +2469,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2448,7 +2547,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2745,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2763,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a1f038c06..eb2191994 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1692,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1715,7 @@ const ( PTRACE_GETSIGMASK = 0x420a PTRACE_GETVFPREGS = 0x1b PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x16 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1732,6 +1756,10 @@ const ( PTRACE_SET_SYSCALL = 0x17 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 PT_DATA_ADDR = 0x10004 PT_TEXT_ADDR = 0x10000 @@ -1793,7 +1821,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1866,6 +1894,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1890,6 +1919,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1897,7 +1927,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1909,6 +1939,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1917,8 +1948,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2003,6 +2034,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2141,6 +2174,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2441,6 +2475,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2454,7 +2553,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2652,6 +2751,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2668,6 +2769,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 504ce1389..fba8ad48e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -561,6 +564,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1089,6 +1093,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1101,6 +1116,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1409,6 +1426,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1674,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1687,6 +1710,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1719,6 +1743,12 @@ const ( PTRACE_SETSIGMASK = 0x420b PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1777,7 +1807,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1850,6 +1880,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1874,6 +1905,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1881,7 +1913,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1893,6 +1925,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1901,8 +1934,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1987,6 +2020,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2125,6 +2160,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2426,6 +2462,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2439,7 +2540,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2637,6 +2738,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2653,6 +2756,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 58b642904..995a7645a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1692,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1708,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1752,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1814,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1887,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1912,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1920,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1932,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1941,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2027,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2167,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2470,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2548,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2747,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2765,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 35e33de60..2a38e1034 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1692,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1708,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1752,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1814,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1887,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1912,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1920,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1932,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1941,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2027,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2167,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2470,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2548,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2747,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2765,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 574fcd8c5..d1df93831 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1692,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1708,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1752,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1814,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1887,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1912,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1920,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1932,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1941,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2027,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2167,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2470,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2548,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2747,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2765,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index cdf0cf5f4..b92e3a589 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1692,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1708,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1728,6 +1752,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1786,7 +1814,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1859,6 +1887,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1883,6 +1912,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1890,7 +1920,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1902,6 +1932,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1910,8 +1941,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1996,6 +2027,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2134,6 +2167,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2436,6 +2470,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2449,7 +2548,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2648,6 +2747,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2664,6 +2765,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eefdb3286..72fd7995f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1407,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1694,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1715,7 @@ const ( PTRACE_GETVRREGS = 0x12 PTRACE_GETVSRREGS = 0x1b PTRACE_GET_DEBUGREG = 0x19 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1731,6 +1755,10 @@ const ( PTRACE_SINGLEBLOCK = 0x100 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1d PTRACE_SYSEMU_SINGLESTEP = 0x1e PTRACE_TRACEME = 0x0 @@ -1844,7 +1872,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1917,6 +1945,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1941,6 +1970,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1948,7 +1978,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1960,6 +1990,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1968,8 +1999,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2054,6 +2085,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2192,6 +2225,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2496,6 +2530,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 TPACKET_ALIGNMENT = 0x10 @@ -2509,7 +2608,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2707,6 +2806,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2723,6 +2824,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0xc00 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 78db21041..d9d5837eb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1407,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1694,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1715,7 @@ const ( PTRACE_GETVRREGS = 0x12 PTRACE_GETVSRREGS = 0x1b PTRACE_GET_DEBUGREG = 0x19 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1731,6 +1755,10 @@ const ( PTRACE_SINGLEBLOCK = 0x100 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1d PTRACE_SYSEMU_SINGLESTEP = 0x1e PTRACE_TRACEME = 0x0 @@ -1844,7 +1872,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1917,6 +1945,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1941,6 +1970,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1948,7 +1978,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1960,6 +1990,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1968,8 +1999,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2054,6 +2085,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2192,6 +2225,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2496,6 +2530,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 TPACKET_ALIGNMENT = 0x10 @@ -2509,7 +2608,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2707,6 +2806,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2723,6 +2824,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0xc00 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 0cd07f933..11810c85b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1692,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1684,6 +1707,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1716,6 +1740,10 @@ const ( PTRACE_SETSIGMASK = 0x420b PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1774,7 +1802,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1847,6 +1875,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1871,6 +1900,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1878,7 +1908,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1890,6 +1920,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1898,8 +1929,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1984,6 +2015,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2122,6 +2155,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2422,6 +2456,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2435,7 +2534,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2633,6 +2732,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2649,6 +2750,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index ac4f1d9f7..70090835c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1086,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1098,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1406,6 +1423,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1694,8 @@ const ( PTRACE_DETACH = 0x11 PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1687,6 +1710,7 @@ const ( PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a PTRACE_GET_LAST_BREAK = 0x5006 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1730,6 +1754,10 @@ const ( PTRACE_SINGLEBLOCK = 0xc PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TE_ABORT_RAND = 0x5011 PTRACE_TRACEME = 0x0 PT_ACR0 = 0x90 @@ -1847,7 +1875,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1920,6 +1948,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1944,6 +1973,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1951,7 +1981,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1963,6 +1993,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1971,8 +2002,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2057,6 +2088,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2195,6 +2228,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2495,6 +2529,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2508,7 +2607,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2706,6 +2805,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2722,6 +2823,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 8a12f1412..99b5e165b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -256,6 +256,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -307,9 +308,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -463,6 +465,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -564,6 +567,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -1090,6 +1094,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1102,6 +1117,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1410,6 +1427,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1675,6 +1696,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1692,6 +1715,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1731,6 +1755,10 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SPARC_DETACH = 0xb PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 PTRACE_WRITEDATA = 0x11 PTRACE_WRITETEXT = 0x13 @@ -1839,7 +1867,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1912,6 +1940,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1936,6 +1965,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1943,7 +1973,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1955,6 +1985,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1963,8 +1994,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2049,6 +2080,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2187,6 +2220,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x47 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2484,6 +2518,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x20005437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2497,7 +2596,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2695,6 +2794,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2711,6 +2812,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 __TIOCFLUSH = 0x80047410 ) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 78ca92339..5ab9a8277 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -928,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -2326,6 +2320,27 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc___sysctl_trampoline() + +//go:linkname libc___sysctl libc___sysctl +//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index f40465ca8..c6557b135 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 @@ -262,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc___sysctl(SB) TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index 2581e8960..985f33888 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1691,6 +1665,32 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 64df03c45..22163ef40 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -2341,6 +2320,27 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc___sysctl_trampoline() + +//go:linkname libc___sysctl libc___sysctl +//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index debcb8ed3..ad410cfbc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +262,8 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc___sysctl(SB) TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index f8caecef0..0e47deb78 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index ed3306239..bbc18c4f6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -928,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 3fd0f3c85..3f4cc0f34 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 5258a7328..43356c832 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -928,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index f57f48f82..96ab9877e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -106,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c5e46e4cf..fe5d462e4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index da8819e48..536abcea3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 6ad9be6dd..37823cd6b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index f88331782..794f61264 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 8eebc6c77..1b34b550c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ecf62a677..5714e2592 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1ba0f7b6f..88a6b3362 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 20012b2f0..c09dbe345 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 2b520deaa..42f6c2103 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index d9f044c95..de2cd8db9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9feed65eb..d51bf07fc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 0a6515088..1e3a3cb73 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index e27f66930..3c97008cd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index e869c0603..7aae554f2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -429,4 +429,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 4917b8ab6..7968439a9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -351,4 +351,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index f85fcb4f8..3c663c69d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 678a119bc..753def987 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -296,4 +296,5 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 222c9f9a2..ac86bd544 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -414,4 +414,5 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 28e6d0e9d..1f5705b58 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -344,4 +344,5 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index e643c6f63..d9ed95326 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -344,4 +344,5 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 01d93c420..94266b65a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -414,4 +414,5 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5744149eb..52e3da649 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 21c832042..6141f90a8 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index c1bb6d8f2..4f7261a88 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -295,4 +295,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index bc3cc6b5b..f47014ac0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -358,4 +358,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 0a2841ba8..dd78abb0d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -373,4 +373,5 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 50bc4128f..d02a18350 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -425,6 +432,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -614,6 +622,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -664,6 +673,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2521,3 +2537,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 055eaa76a..f347457e9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +433,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -615,6 +623,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -665,6 +674,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2535,3 +2551,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 66019c9cf..d53d575b8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -289,6 +289,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -429,6 +436,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -618,6 +626,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -668,6 +677,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2512,3 +2528,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3104798c4..aa41189bd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2514,3 +2530,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 46c86021b..913efd616 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -288,6 +288,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2518,3 +2534,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index c2fe1a62a..860fb5dae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2516,3 +2532,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index f1eb0d397..12138089a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2516,3 +2532,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 8759bc36b..2498796fd 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -288,6 +288,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2518,3 +2534,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a81200541..17b83f758 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -287,6 +287,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2524,3 +2540,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 74b7a9199..d289725b6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -287,6 +287,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -617,6 +625,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +676,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2524,3 +2540,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index ccea3e638..7546c1340 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -616,6 +624,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +675,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2542,3 +2558,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d8fc0bc1c..8907bc74b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +433,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -615,6 +623,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -665,6 +674,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2538,3 +2554,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 5e0ab9329..5efa151eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -289,6 +289,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -430,6 +437,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -619,6 +627,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -669,6 +678,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2519,3 +2535,58 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 7dfe201a3..308d48473 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -9,14 +9,6 @@ import ( "unsafe" ) -const ( - STANDARD_RIGHTS_REQUIRED = 0xf0000 - STANDARD_RIGHTS_READ = 0x20000 - STANDARD_RIGHTS_WRITE = 0x20000 - STANDARD_RIGHTS_EXECUTE = 0x20000 - STANDARD_RIGHTS_ALL = 0x1F0000 -) - const ( NameUnknown = 0 NameFullyQualifiedDN = 1 @@ -235,16 +227,15 @@ func LookupSID(system, account string) (sid *SID, domain string, accType uint32, } } -// String converts SID to a string format -// suitable for display, storage, or transmission. -func (sid *SID) String() (string, error) { +// String converts SID to a string format suitable for display, storage, or transmission. +func (sid *SID) String() string { var s *uint16 e := ConvertSidToStringSid(sid, &s) if e != nil { - return "", e + return "" } defer LocalFree((Handle)(unsafe.Pointer(s))) - return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(s))[:]) } // Len returns the length, in bytes, of a valid security identifier SID. @@ -660,17 +651,9 @@ type Token Handle // associated with current process. It is a real // token that needs to be closed, unlike // GetCurrentProcessToken. -func OpenCurrentProcessToken() (Token, error) { - p, e := GetCurrentProcess() - if e != nil { - return 0, e - } - var t Token - e = OpenProcessToken(p, TOKEN_QUERY, &t) - if e != nil { - return 0, e - } - return t, nil +func OpenCurrentProcessToken() (token Token, err error) { + err = OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY|TOKEN_DUPLICATE, &token) + return } // GetCurrentProcessToken returns the access token associated with @@ -890,3 +873,521 @@ type WTS_SESSION_INFO struct { //sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken //sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW //sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory + +type ACL struct { + aclRevision byte + sbz1 byte + aclSize uint16 + aceCount uint16 + sbz2 uint16 +} + +type SECURITY_DESCRIPTOR struct { + revision byte + sbz1 byte + control SECURITY_DESCRIPTOR_CONTROL + owner *SID + group *SID + sacl *ACL + dacl *ACL +} + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + InheritHandle uint32 +} + +type SE_OBJECT_TYPE uint32 + +// Constants for type SE_OBJECT_TYPE +const ( + SE_UNKNOWN_OBJECT_TYPE = 0 + SE_FILE_OBJECT = 1 + SE_SERVICE = 2 + SE_PRINTER = 3 + SE_REGISTRY_KEY = 4 + SE_LMSHARE = 5 + SE_KERNEL_OBJECT = 6 + SE_WINDOW_OBJECT = 7 + SE_DS_OBJECT = 8 + SE_DS_OBJECT_ALL = 9 + SE_PROVIDER_DEFINED_OBJECT = 10 + SE_WMIGUID_OBJECT = 11 + SE_REGISTRY_WOW64_32KEY = 12 + SE_REGISTRY_WOW64_64KEY = 13 +) + +type SECURITY_INFORMATION uint32 + +// Constants for type SECURITY_INFORMATION +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +type SECURITY_DESCRIPTOR_CONTROL uint16 + +// Constants for type SECURITY_DESCRIPTOR_CONTROL +const ( + SE_OWNER_DEFAULTED = 0x0001 + SE_GROUP_DEFAULTED = 0x0002 + SE_DACL_PRESENT = 0x0004 + SE_DACL_DEFAULTED = 0x0008 + SE_SACL_PRESENT = 0x0010 + SE_SACL_DEFAULTED = 0x0020 + SE_DACL_AUTO_INHERIT_REQ = 0x0100 + SE_SACL_AUTO_INHERIT_REQ = 0x0200 + SE_DACL_AUTO_INHERITED = 0x0400 + SE_SACL_AUTO_INHERITED = 0x0800 + SE_DACL_PROTECTED = 0x1000 + SE_SACL_PROTECTED = 0x2000 + SE_RM_CONTROL_VALID = 0x4000 + SE_SELF_RELATIVE = 0x8000 +) + +type ACCESS_MASK uint32 + +// Constants for type ACCESS_MASK +const ( + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + SYNCHRONIZE = 0x00100000 + STANDARD_RIGHTS_REQUIRED = 0x000F0000 + STANDARD_RIGHTS_READ = READ_CONTROL + STANDARD_RIGHTS_WRITE = READ_CONTROL + STANDARD_RIGHTS_EXECUTE = READ_CONTROL + STANDARD_RIGHTS_ALL = 0x001F0000 + SPECIFIC_RIGHTS_ALL = 0x0000FFFF + ACCESS_SYSTEM_SECURITY = 0x01000000 + MAXIMUM_ALLOWED = 0x02000000 + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 +) + +type ACCESS_MODE uint32 + +// Constants for type ACCESS_MODE +const ( + NOT_USED_ACCESS = 0 + GRANT_ACCESS = 1 + SET_ACCESS = 2 + DENY_ACCESS = 3 + REVOKE_ACCESS = 4 + SET_AUDIT_SUCCESS = 5 + SET_AUDIT_FAILURE = 6 +) + +// Constants for AceFlags and Inheritance fields +const ( + NO_INHERITANCE = 0x0 + SUB_OBJECTS_ONLY_INHERIT = 0x1 + SUB_CONTAINERS_ONLY_INHERIT = 0x2 + SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 + INHERIT_NO_PROPAGATE = 0x4 + INHERIT_ONLY = 0x8 + INHERITED_ACCESS_ENTRY = 0x10 + INHERITED_PARENT = 0x10000000 + INHERITED_GRANDPARENT = 0x20000000 + OBJECT_INHERIT_ACE = 0x1 + CONTAINER_INHERIT_ACE = 0x2 + NO_PROPAGATE_INHERIT_ACE = 0x4 + INHERIT_ONLY_ACE = 0x8 + INHERITED_ACE = 0x10 + VALID_INHERIT_FLAGS = 0x1F +) + +type MULTIPLE_TRUSTEE_OPERATION uint32 + +// Constants for MULTIPLE_TRUSTEE_OPERATION +const ( + NO_MULTIPLE_TRUSTEE = 0 + TRUSTEE_IS_IMPERSONATE = 1 +) + +type TRUSTEE_FORM uint32 + +// Constants for TRUSTEE_FORM +const ( + TRUSTEE_IS_SID = 0 + TRUSTEE_IS_NAME = 1 + TRUSTEE_BAD_FORM = 2 + TRUSTEE_IS_OBJECTS_AND_SID = 3 + TRUSTEE_IS_OBJECTS_AND_NAME = 4 +) + +type TRUSTEE_TYPE uint32 + +// Constants for TRUSTEE_TYPE +const ( + TRUSTEE_IS_UNKNOWN = 0 + TRUSTEE_IS_USER = 1 + TRUSTEE_IS_GROUP = 2 + TRUSTEE_IS_DOMAIN = 3 + TRUSTEE_IS_ALIAS = 4 + TRUSTEE_IS_WELL_KNOWN_GROUP = 5 + TRUSTEE_IS_DELETED = 6 + TRUSTEE_IS_INVALID = 7 + TRUSTEE_IS_COMPUTER = 8 +) + +// Constants for ObjectsPresent field +const ( + ACE_OBJECT_TYPE_PRESENT = 0x1 + ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 +) + +type EXPLICIT_ACCESS struct { + AccessPermissions ACCESS_MASK + AccessMode ACCESS_MODE + Inheritance uint32 + Trustee TRUSTEE +} + +// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. +type TrusteeValue uintptr + +func TrusteeValueFromString(str string) TrusteeValue { + return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) +} +func TrusteeValueFromSID(sid *SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(sid)) +} +func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndSid)) +} +func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndName)) +} + +type TRUSTEE struct { + MultipleTrustee *TRUSTEE + MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION + TrusteeForm TRUSTEE_FORM + TrusteeType TRUSTEE_TYPE + TrusteeValue TrusteeValue +} + +type OBJECTS_AND_SID struct { + ObjectsPresent uint32 + ObjectTypeGuid GUID + InheritedObjectTypeGuid GUID + Sid *SID +} + +type OBJECTS_AND_NAME struct { + ObjectsPresent uint32 + ObjectType SE_OBJECT_TYPE + ObjectTypeName *uint16 + InheritedObjectTypeName *uint16 + Name *uint16 +} + +//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo +//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW +//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW + +//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW +//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor + +//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl +//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl +//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl +//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner +//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup +//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength +//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl +//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor + +//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl +//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl +//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl +//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner +//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup +//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl + +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW + +//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD +//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD + +//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW + +// Control returns the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { + err = getSecurityDescriptorControl(sd, &control, &revision) + return +} + +// SetControl sets the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { + return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) +} + +// RMControl returns the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { + err = getSecurityDescriptorRMControl(sd, &control) + return +} + +// SetRMControl sets the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { + setSecurityDescriptorRMControl(sd, &rmControl) +} + +// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil +// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetDACL sets the absolute security descriptor DACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) +} + +// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil +// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetSACL sets the absolute security descriptor SACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) +} + +// Owner returns the security descriptor owner and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { + err = getSecurityDescriptorOwner(sd, &owner, &defaulted) + return +} + +// SetOwner sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { + return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) +} + +// Group returns the security descriptor group and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { + err = getSecurityDescriptorGroup(sd, &group, &defaulted) + return +} + +// SetGroup sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { + return setSecurityDescriptorGroup(absoluteSD, group, defaulted) +} + +// Length returns the length of the security descriptor. +func (sd *SECURITY_DESCRIPTOR) Length() uint32 { + return getSecurityDescriptorLength(sd) +} + +// IsValid returns whether the security descriptor is valid. +func (sd *SECURITY_DESCRIPTOR) IsValid() bool { + return isValidSecurityDescriptor(sd) +} + +// String returns the SDDL form of the security descriptor, with a function signature that can be +// used with %v formatting directives. +func (sd *SECURITY_DESCRIPTOR) String() string { + var sddl *uint16 + err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) + if err != nil { + return "" + } + defer LocalFree(Handle(unsafe.Pointer(sddl))) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:]) +} + +// ToAbsolute converts a self-relative security descriptor into an absolute one. +func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := selfRelativeSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE == 0 { + err = ERROR_INVALID_PARAMETER + return + } + var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 + err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, + nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeAbsoluteSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if absoluteSDSize > 0 { + absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + } + var ( + dacl *ACL + sacl *ACL + owner *SID + group *SID + ) + if daclSize > 0 { + dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + } + if saclSize > 0 { + sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + } + if ownerSize > 0 { + owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + } + if groupSize > 0 { + group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + } + err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, + dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + return +} + +// ToSelfRelative converts an absolute security descriptor into a self-relative one. +func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := absoluteSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE != 0 { + err = ERROR_INVALID_PARAMETER + return + } + var selfRelativeSDSize uint32 + err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeSelfRelativeSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if selfRelativeSDSize > 0 { + selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) + } + err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) + return +} + +func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { + sdBytes := make([]byte, selfRelativeSD.Length()) + copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)]) + return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0])) +} + +// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a +// self-relative security descriptor object allocated on the Go heap. +func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetSecurityInfo queries the security information for a given handle and returns the self-relative security +// descriptor result on the Go heap. +func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security +// descriptor result on the Go heap. +func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and +// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor +// result on the Go heap. +func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + var winHeapSDSize uint32 + var firstAccessEntry *EXPLICIT_ACCESS + if len(accessEntries) > 0 { + firstAccessEntry = &accessEntries[0] + } + var firstAuditEntry *EXPLICIT_ACCESS + if len(auditEntries) > 0 { + firstAuditEntry = &auditEntries[0] + } + err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// NewSecurityDescriptor creates and initializes a new absolute security descriptor. +func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + absoluteSD = &SECURITY_DESCRIPTOR{} + err = initializeSecurityDescriptor(absoluteSD, 1) + return +} + +// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. +// Both explicitEntries and mergedACL are optional and can be nil. +func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { + var firstExplicitEntry *EXPLICIT_ACCESS + if len(explicitEntries) > 0 { + firstExplicitEntry = &explicitEntries[0] + } + var winHeapACL *ACL + err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) + aclBytes := make([]byte, winHeapACL.aclSize) + copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)]) + return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 33e7d45ab..47bde878d 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -178,8 +178,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW -//sys GetCurrentProcess() (pseudoHandle Handle, err error) -//sys GetCurrentThread() (pseudoHandle Handle, err error) +//sys GetCurrentProcess() (pseudoHandle Handle) +//sys GetCurrentThread() (pseudoHandle Handle) //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -257,6 +257,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW +//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx //sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW //sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject @@ -292,6 +296,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW //sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW //sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx +//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW +//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters +//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters //sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString //sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1e3947f0f..a54823414 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -62,11 +62,6 @@ var signals = [...]string{ } const ( - GENERIC_READ = 0x80000000 - GENERIC_WRITE = 0x40000000 - GENERIC_EXECUTE = 0x20000000 - GENERIC_ALL = 0x10000000 - FILE_LIST_DIRECTORY = 0x00000001 FILE_APPEND_DATA = 0x00000004 FILE_WRITE_ATTRIBUTES = 0x00000100 @@ -158,13 +153,6 @@ const ( WAIT_OBJECT_0 = 0x00000000 WAIT_FAILED = 0xFFFFFFFF - // Standard access rights. - DELETE = 0x00010000 - READ_CONTROL = 0x00020000 - SYNCHRONIZE = 0x00100000 - WRITE_DAC = 0x00040000 - WRITE_OWNER = 0x00080000 - // Access rights for process. PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 @@ -483,12 +471,6 @@ func NsecToTimeval(nsec int64) (tv Timeval) { return } -type SecurityAttributes struct { - Length uint32 - SecurityDescriptor uintptr - InheritHandle uint32 -} - type Overlapped struct { Internal uintptr InternalHigh uintptr @@ -1190,6 +1172,28 @@ const ( REG_QWORD = REG_QWORD_LITTLE_ENDIAN ) +const ( + EVENT_MODIFY_STATE = 0x0002 + EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + MUTANT_QUERY_STATE = 0x0001 + MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE + + SEMAPHORE_MODIFY_STATE = 0x0002 + SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + TIMER_QUERY_STATE = 0x0001 + TIMER_MODIFY_STATE = 0x0002 + TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE + + MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE + MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS + + CREATE_EVENT_MANUAL_RESET = 0x1 + CREATE_EVENT_INITIAL_SET = 0x2 + CREATE_MUTEX_INITIAL_OWNER = 0x1 +) + type AddrinfoW struct { Flags int32 Family int32 @@ -1666,3 +1670,68 @@ type OsVersionInfoEx struct { ProductType byte _ byte } + +const ( + EWX_LOGOFF = 0x00000000 + EWX_SHUTDOWN = 0x00000001 + EWX_REBOOT = 0x00000002 + EWX_FORCE = 0x00000004 + EWX_POWEROFF = 0x00000008 + EWX_FORCEIFHUNG = 0x00000010 + EWX_QUICKRESOLVE = 0x00000020 + EWX_RESTARTAPPS = 0x00000040 + EWX_HYBRID_SHUTDOWN = 0x00400000 + EWX_BOOTOPTIONS = 0x01000000 + + SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 + SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 + SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 + SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 + SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 + SHTDN_REASON_FLAG_PLANNED = 0x80000000 + SHTDN_REASON_MAJOR_OTHER = 0x00000000 + SHTDN_REASON_MAJOR_NONE = 0x00000000 + SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 + SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 + SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 + SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 + SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 + SHTDN_REASON_MAJOR_POWER = 0x00060000 + SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 + SHTDN_REASON_MINOR_OTHER = 0x00000000 + SHTDN_REASON_MINOR_NONE = 0x000000ff + SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 + SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 + SHTDN_REASON_MINOR_UPGRADE = 0x00000003 + SHTDN_REASON_MINOR_RECONFIG = 0x00000004 + SHTDN_REASON_MINOR_HUNG = 0x00000005 + SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 + SHTDN_REASON_MINOR_DISK = 0x00000007 + SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 + SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 + SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a + SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b + SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c + SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d + SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e + SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F + SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 + SHTDN_REASON_MINOR_HOTFIX = 0x00000011 + SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 + SHTDN_REASON_MINOR_SECURITY = 0x00000013 + SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 + SHTDN_REASON_MINOR_WMI = 0x00000015 + SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 + SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 + SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 + SHTDN_REASON_MINOR_MMC = 0x00000019 + SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a + SHTDN_REASON_MINOR_TERMSRV = 0x00000020 + SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 + SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 + SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE + SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED + SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff + + SHUTDOWN_NORETRY = 0x1 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 1c275cb93..d0b74bfcb 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -51,265 +51,298 @@ var ( modnetapi32 = NewLazySystemDLL("netapi32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") - procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") - procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") - procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") - procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") - procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") - procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") - procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") - procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procSleepEx = modkernel32.NewProc("SleepEx") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") - procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") - procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") - procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") - procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") - procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") - procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") - procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") - procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") - procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") - procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") - procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") - procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") - procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") - procCoCreateGuid = modole32.NewProc("CoCreateGuid") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procWSAStartup = modws2_32.NewProc("WSAStartup") - procWSACleanup = modws2_32.NewProc("WSACleanup") - procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") - procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") - procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") - procWSASendTo = modws2_32.NewProc("WSASendTo") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procDeleteService = modadvapi32.NewProc("DeleteService") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procControlService = modadvapi32.NewProc("ControlService") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") + procGetLastError = modkernel32.NewProc("GetLastError") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetVersion = modkernel32.NewProc("GetVersion") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procReadFile = modkernel32.NewProc("ReadFile") + procWriteFile = modkernel32.NewProc("WriteFile") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindClose = modkernel32.NewProc("FindClose") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procGetFileType = modkernel32.NewProc("GetFileType") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procSleepEx = modkernel32.NewProc("SleepEx") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procOpenThread = modkernel32.NewProc("OpenThread") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procsocket = modws2_32.NewProc("socket") + procsetsockopt = modws2_32.NewProc("setsockopt") + procgetsockopt = modws2_32.NewProc("getsockopt") + procbind = modws2_32.NewProc("bind") + procconnect = modws2_32.NewProc("connect") + procgetsockname = modws2_32.NewProc("getsockname") + procgetpeername = modws2_32.NewProc("getpeername") + proclisten = modws2_32.NewProc("listen") + procshutdown = modws2_32.NewProc("shutdown") + procclosesocket = modws2_32.NewProc("closesocket") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSASend = modws2_32.NewProc("WSASend") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procntohs = modws2_32.NewProc("ntohs") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetACP = modkernel32.NewProc("GetACP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procCopySid = modadvapi32.NewProc("CopySid") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procEqualSid = modadvapi32.NewProc("EqualSid") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") ) func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { @@ -686,7 +719,14 @@ func ExitProcess(exitcode uint32) { } func IsWow64Process(handle Handle, isWow64 *bool) (err error) { - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(isWow64)), 0) + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -1169,29 +1209,15 @@ func GetStartupInfo(startupInfo *StartupInfo) (err error) { return } -func GetCurrentProcess() (pseudoHandle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0) +func GetCurrentProcess() (pseudoHandle Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0) pseudoHandle = Handle(r0) - if pseudoHandle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } return } -func GetCurrentThread() (pseudoHandle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) +func GetCurrentThread() (pseudoHandle Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) pseudoHandle = Handle(r0) - if pseudoHandle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } return } @@ -2109,6 +2135,69 @@ func PulseEvent(event Handle) (err error) { return } +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { var _p0 uint32 if alertable { @@ -2517,6 +2606,66 @@ func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret return } +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) if r0 != 0 { @@ -3388,3 +3537,358 @@ func WTSFreeMemory(ptr uintptr) { syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) return } + +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { + syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + return +} + +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) +} + +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { + var _p0 uint32 + if *saclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { + var _p0 uint32 + if *groupDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) + return +} + +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 + return +} + +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { + var _p0 uint32 + if saclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { + var _p0 uint32 + if ownerDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} diff --git a/vendor/golang.org/x/tools/AUTHORS b/vendor/golang.org/x/tools/AUTHORS new file mode 100644 index 000000000..15167cd74 --- /dev/null +++ b/vendor/golang.org/x/tools/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/tools/CONTRIBUTORS b/vendor/golang.org/x/tools/CONTRIBUTORS new file mode 100644 index 000000000..1c4577e96 --- /dev/null +++ b/vendor/golang.org/x/tools/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 000000000..6a66aea5e --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 000000000..733099041 --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 000000000..6b7052b89 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 000000000..3e4b19536 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,481 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, path string) (added bool) { + return AddNamedImport(fset, f, "", path) +} + +// AddNamedImport adds the import with the given name and path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, path string) (added bool) { + if imports(f, name, path) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(path), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with path. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(path) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, path) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import, preceded by a blank line, goes after the package declaration + // and after the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + // +2 for a blank line + impDecl.TokPos = c.End() + 2 + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +// If there are duplicate import declarations, all matching ones are deleted. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if importName(impspec) != name || importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports reports whether f has an import with the specified name and path. +func imports(f *ast.File, name, path string) bool { + for _, s := range f.Imports { + if importName(s) == name && importPath(s) == path { + return true + } + } + return false +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importName returns the name of s, +// or "" if the import is not named. +func importName(s *ast.ImportSpec) string { + if s.Name == nil { + return "" + } + return s.Name.Name +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 000000000..cf72ea990 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,477 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 000000000..763062982 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 000000000..f8363d8fa --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,109 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +// +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "fmt" + "go/token" + "go/types" + "io" + "io/ioutil" + + "golang.org/x/tools/go/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the workspace layout conventions of go/build. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +func Find(importPath, srcDir string) (filename, path string) { + return gcimporter.FindPkg(importPath, srcDir) +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, err := gcimporter.FindExportData(buf) + // If we ever switch to a zip-like archive format with the ToC + // at the end, we can return the correct portion of export data, + // but for now we must return the entire rest of the file. + return buf, err +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// The package name is specified by path. +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := ioutil.ReadAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The App Engine Go runtime v1.6 uses the old export data format. + // TODO(adonovan): delete once v1.7 has been around for a while. + if bytes.HasPrefix(data, []byte("package ")) { + return gcimporter.ImportData(imports, path, path, bytes.NewReader(data)) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + } + + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + b, err := gcimporter.IExportData(fset, pkg) + if err != nil { + return err + } + _, err = out.Write(b) + return err +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 000000000..efe221e7e --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,73 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go new file mode 100644 index 000000000..a807d0aaa --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/ast" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// If trace is set, debugging output is printed to std out. +const trace = false // default: false + +// Current export format version. Increase with each format change. +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !ast.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !ast.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if ast.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !ast.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go new file mode 100644 index 000000000..e9f73d14a --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/bimport.go @@ -0,0 +1,1039 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*token.File +} + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we + // reserve maxlines positions per file. + const maxlines = 64 * 1024 + f := s.files[file] + if f == nil { + f = s.fset.AddFile(file, -1, maxlines) + s.files[file] = f + // Allocate the fake linebreak indices on first use. + // TODO(adonovan): opt: save ~512KB using a more complex scheme? + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + f.SetLines(fakeLines) + } + + if line > maxlines { + line = 1 + } + + // Treat the file as if it contained only newlines + // and column=1: use the line number as the offset. + return f.Pos(line - 1) +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go new file mode 100644 index 000000000..f33dc5613 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/exportdata.go @@ -0,0 +1,93 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + size, err = strconv.Atoi(s) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// +func FindExportData(r *bufio.Reader) (hdr string, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, _, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + } + hdr = string(line) + + return +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go new file mode 100644 index 000000000..9cf186605 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -0,0 +1,1078 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a modified copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go, +// but it also contains the original source-based importer code for Go1.6. +// Once we stop supporting 1.6, we can remove that code. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/go/internal/gcimporter" + +import ( + "bufio" + "errors" + "fmt" + "go/build" + "go/constant" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "text/scanner" +) + +// debugging/development support +const debug = false + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +// +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + id = path // make sure we have an id to print in error message + return + } + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// ImportData imports a package by reading the gc-generated export data, +// adds the corresponding package object to the packages map indexed by id, +// and returns the object. +// +// The packages map must contains all packages already imported. The data +// reader position must be the beginning of the export data section. The +// filename is only used in error messages. +// +// If packages[id] contains the completely imported package, that package +// can be used directly, and there is no need to call this function (but +// there is also no harm but for extra time used). +// +func ImportData(packages map[string]*types.Package, filename, id string, data io.Reader) (pkg *types.Package, err error) { + // support for parser error handling + defer func() { + switch r := recover().(type) { + case nil: + // nothing to do + case importError: + err = r + default: + panic(r) // internal error + } + }() + + var p parser + p.init(filename, id, data, packages) + pkg = p.parseExport() + + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +// +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + buf := bufio.NewReader(rc) + if hdr, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$\n": + // Work-around if we don't have a filename; happens only if lookup != nil. + // Either way, the filename is only needed for importer error messages, so + // this is fine. + if filename == "" { + filename = path + } + return ImportData(packages, filename, id, buf) + + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 && data[0] == 'i' { + _, pkg, err = IImportData(fset, packages, data[1:], id) + } else { + _, pkg, err = BImportData(fset, packages, data, id) + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +// ---------------------------------------------------------------------------- +// Parser + +// TODO(gri) Imported objects don't have position information. +// Ideally use the debug table line info; alternatively +// create some fake position (or the position of the +// import). That way error messages referring to imported +// objects can print meaningful information. + +// parser parses the exports inside a gc compiler-produced +// object/archive file and populates its scope with the results. +type parser struct { + scanner scanner.Scanner + tok rune // current token + lit string // literal string; only valid for Ident, Int, String tokens + id string // package id of imported package + sharedPkgs map[string]*types.Package // package id -> package object (across importer) + localPkgs map[string]*types.Package // package id -> package object (just this package) +} + +func (p *parser) init(filename, id string, src io.Reader, packages map[string]*types.Package) { + p.scanner.Init(src) + p.scanner.Error = func(_ *scanner.Scanner, msg string) { p.error(msg) } + p.scanner.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanChars | scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments + p.scanner.Whitespace = 1<<'\t' | 1<<' ' + p.scanner.Filename = filename // for good error messages + p.next() + p.id = id + p.sharedPkgs = packages + if debug { + // check consistency of packages map + for _, pkg := range packages { + if pkg.Name() == "" { + fmt.Printf("no package name for %s\n", pkg.Path()) + } + } + } +} + +func (p *parser) next() { + p.tok = p.scanner.Scan() + switch p.tok { + case scanner.Ident, scanner.Int, scanner.Char, scanner.String, '·': + p.lit = p.scanner.TokenText() + default: + p.lit = "" + } + if debug { + fmt.Printf("%s: %q -> %q\n", scanner.TokenString(p.tok), p.scanner.TokenText(), p.lit) + } +} + +func declTypeName(pkg *types.Package, name string) *types.TypeName { + scope := pkg.Scope() + if obj := scope.Lookup(name); obj != nil { + return obj.(*types.TypeName) + } + obj := types.NewTypeName(token.NoPos, pkg, name, nil) + // a named type may be referred to before the underlying type + // is known - set it up + types.NewNamed(obj, nil, nil) + scope.Insert(obj) + return obj +} + +// ---------------------------------------------------------------------------- +// Error handling + +// Internal errors are boxed as importErrors. +type importError struct { + pos scanner.Position + err error +} + +func (e importError) Error() string { + return fmt.Sprintf("import error %s (byte offset = %d): %s", e.pos, e.pos.Offset, e.err) +} + +func (p *parser) error(err interface{}) { + if s, ok := err.(string); ok { + err = errors.New(s) + } + // panic with a runtime.Error if err is not an error + panic(importError{p.scanner.Pos(), err.(error)}) +} + +func (p *parser) errorf(format string, args ...interface{}) { + p.error(fmt.Sprintf(format, args...)) +} + +func (p *parser) expect(tok rune) string { + lit := p.lit + if p.tok != tok { + p.errorf("expected %s, got %s (%s)", scanner.TokenString(tok), scanner.TokenString(p.tok), lit) + } + p.next() + return lit +} + +func (p *parser) expectSpecial(tok string) { + sep := 'x' // not white space + i := 0 + for i < len(tok) && p.tok == rune(tok[i]) && sep > ' ' { + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + i++ + } + if i < len(tok) { + p.errorf("expected %q, got %q", tok, tok[0:i]) + } +} + +func (p *parser) expectKeyword(keyword string) { + lit := p.expect(scanner.Ident) + if lit != keyword { + p.errorf("expected keyword %s, got %q", keyword, lit) + } +} + +// ---------------------------------------------------------------------------- +// Qualified and unqualified names + +// PackageId = string_lit . +// +func (p *parser) parsePackageId() string { + id, err := strconv.Unquote(p.expect(scanner.String)) + if err != nil { + p.error(err) + } + // id == "" stands for the imported package id + // (only known at time of package installation) + if id == "" { + id = p.id + } + return id +} + +// PackageName = ident . +// +func (p *parser) parsePackageName() string { + return p.expect(scanner.Ident) +} + +// dotIdentifier = ( ident | '·' ) { ident | int | '·' } . +func (p *parser) parseDotIdent() string { + ident := "" + if p.tok != scanner.Int { + sep := 'x' // not white space + for (p.tok == scanner.Ident || p.tok == scanner.Int || p.tok == '·') && sep > ' ' { + ident += p.lit + sep = p.scanner.Peek() // if sep <= ' ', there is white space before the next token + p.next() + } + } + if ident == "" { + p.expect(scanner.Ident) // use expect() for error handling + } + return ident +} + +// QualifiedName = "@" PackageId "." ( "?" | dotIdentifier ) . +// +func (p *parser) parseQualifiedName() (id, name string) { + p.expect('@') + id = p.parsePackageId() + p.expect('.') + // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. + if p.tok == '?' { + p.next() + } else { + name = p.parseDotIdent() + } + return +} + +// getPkg returns the package for a given id. If the package is +// not found, create the package and add it to the p.localPkgs +// and p.sharedPkgs maps. name is the (expected) name of the +// package. If name == "", the package name is expected to be +// set later via an import clause in the export data. +// +// id identifies a package, usually by a canonical package path like +// "encoding/json" but possibly by a non-canonical import path like +// "./json". +// +func (p *parser) getPkg(id, name string) *types.Package { + // package unsafe is not in the packages maps - handle explicitly + if id == "unsafe" { + return types.Unsafe + } + + pkg := p.localPkgs[id] + if pkg == nil { + // first import of id from this package + pkg = p.sharedPkgs[id] + if pkg == nil { + // first import of id by this importer; + // add (possibly unnamed) pkg to shared packages + pkg = types.NewPackage(id, name) + p.sharedPkgs[id] = pkg + } + // add (possibly unnamed) pkg to local packages + if p.localPkgs == nil { + p.localPkgs = make(map[string]*types.Package) + } + p.localPkgs[id] = pkg + } else if name != "" { + // package exists already and we have an expected package name; + // make sure names match or set package name if necessary + if pname := pkg.Name(); pname == "" { + pkg.SetName(name) + } else if pname != name { + p.errorf("%s package name mismatch: %s (given) vs %s (expected)", id, pname, name) + } + } + return pkg +} + +// parseExportedName is like parseQualifiedName, but +// the package id is resolved to an imported *types.Package. +// +func (p *parser) parseExportedName() (pkg *types.Package, name string) { + id, name := p.parseQualifiedName() + pkg = p.getPkg(id, "") + return +} + +// ---------------------------------------------------------------------------- +// Types + +// BasicType = identifier . +// +func (p *parser) parseBasicType() types.Type { + id := p.expect(scanner.Ident) + obj := types.Universe.Lookup(id) + if obj, ok := obj.(*types.TypeName); ok { + return obj.Type() + } + p.errorf("not a basic type: %s", id) + return nil +} + +// ArrayType = "[" int_lit "]" Type . +// +func (p *parser) parseArrayType(parent *types.Package) types.Type { + // "[" already consumed and lookahead known not to be "]" + lit := p.expect(scanner.Int) + p.expect(']') + elem := p.parseType(parent) + n, err := strconv.ParseInt(lit, 10, 64) + if err != nil { + p.error(err) + } + return types.NewArray(elem, n) +} + +// MapType = "map" "[" Type "]" Type . +// +func (p *parser) parseMapType(parent *types.Package) types.Type { + p.expectKeyword("map") + p.expect('[') + key := p.parseType(parent) + p.expect(']') + elem := p.parseType(parent) + return types.NewMap(key, elem) +} + +// Name = identifier | "?" | QualifiedName . +// +// For unqualified and anonymous names, the returned package is the parent +// package unless parent == nil, in which case the returned package is the +// package being imported. (The parent package is not nil if the the name +// is an unqualified struct field or interface method name belonging to a +// type declared in another package.) +// +// For qualified names, the returned package is nil (and not created if +// it doesn't exist yet) unless materializePkg is set (which creates an +// unnamed package with valid package path). In the latter case, a +// subsequent import clause is expected to provide a name for the package. +// +func (p *parser) parseName(parent *types.Package, materializePkg bool) (pkg *types.Package, name string) { + pkg = parent + if pkg == nil { + pkg = p.sharedPkgs[p.id] + } + switch p.tok { + case scanner.Ident: + name = p.lit + p.next() + case '?': + // anonymous + p.next() + case '@': + // exported name prefixed with package path + pkg = nil + var id string + id, name = p.parseQualifiedName() + if materializePkg { + pkg = p.getPkg(id, "") + } + default: + p.error("name expected") + } + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +// Field = Name Type [ string_lit ] . +// +func (p *parser) parseField(parent *types.Package) (*types.Var, string) { + pkg, name := p.parseName(parent, true) + + if name == "_" { + // Blank fields should be package-qualified because they + // are unexported identifiers, but gc does not qualify them. + // Assuming that the ident belongs to the current package + // causes types to change during re-exporting, leading + // to spurious "can't assign A to B" errors from go/types. + // As a workaround, pretend all blank fields belong + // to the same unique dummy package. + const blankpkg = "<_>" + pkg = p.getPkg(blankpkg, blankpkg) + } + + typ := p.parseType(parent) + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + p.errorf("anonymous field expected") + } + anonymous = true + } + tag := "" + if p.tok == scanner.String { + s := p.expect(scanner.String) + var err error + tag, err = strconv.Unquote(s) + if err != nil { + p.errorf("invalid struct tag %s: %s", s, err) + } + } + return types.NewField(token.NoPos, pkg, name, typ, anonymous), tag +} + +// StructType = "struct" "{" [ FieldList ] "}" . +// FieldList = Field { ";" Field } . +// +func (p *parser) parseStructType(parent *types.Package) types.Type { + var fields []*types.Var + var tags []string + + p.expectKeyword("struct") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + fld, tag := p.parseField(parent) + if tag != "" && tags == nil { + tags = make([]string, i) + } + if tags != nil { + tags = append(tags, tag) + } + fields = append(fields, fld) + } + p.expect('}') + + return types.NewStruct(fields, tags) +} + +// Parameter = ( identifier | "?" ) [ "..." ] Type [ string_lit ] . +// +func (p *parser) parseParameter() (par *types.Var, isVariadic bool) { + _, name := p.parseName(nil, false) + // remove gc-specific parameter numbering + if i := strings.Index(name, "·"); i >= 0 { + name = name[:i] + } + if p.tok == '.' { + p.expectSpecial("...") + isVariadic = true + } + typ := p.parseType(nil) + if isVariadic { + typ = types.NewSlice(typ) + } + // ignore argument tag (e.g. "noescape") + if p.tok == scanner.String { + p.next() + } + // TODO(gri) should we provide a package? + par = types.NewVar(token.NoPos, nil, name, typ) + return +} + +// Parameters = "(" [ ParameterList ] ")" . +// ParameterList = { Parameter "," } Parameter . +// +func (p *parser) parseParameters() (list []*types.Var, isVariadic bool) { + p.expect('(') + for p.tok != ')' && p.tok != scanner.EOF { + if len(list) > 0 { + p.expect(',') + } + par, variadic := p.parseParameter() + list = append(list, par) + if variadic { + if isVariadic { + p.error("... not on final argument") + } + isVariadic = true + } + } + p.expect(')') + + return +} + +// Signature = Parameters [ Result ] . +// Result = Type | Parameters . +// +func (p *parser) parseSignature(recv *types.Var) *types.Signature { + params, isVariadic := p.parseParameters() + + // optional result type + var results []*types.Var + if p.tok == '(' { + var variadic bool + results, variadic = p.parseParameters() + if variadic { + p.error("... not permitted on result type") + } + } + + return types.NewSignature(recv, types.NewTuple(params...), types.NewTuple(results...), isVariadic) +} + +// InterfaceType = "interface" "{" [ MethodList ] "}" . +// MethodList = Method { ";" Method } . +// Method = Name Signature . +// +// The methods of embedded interfaces are always "inlined" +// by the compiler and thus embedded interfaces are never +// visible in the export data. +// +func (p *parser) parseInterfaceType(parent *types.Package) types.Type { + var methods []*types.Func + + p.expectKeyword("interface") + p.expect('{') + for i := 0; p.tok != '}' && p.tok != scanner.EOF; i++ { + if i > 0 { + p.expect(';') + } + pkg, name := p.parseName(parent, true) + sig := p.parseSignature(nil) + methods = append(methods, types.NewFunc(token.NoPos, pkg, name, sig)) + } + p.expect('}') + + // Complete requires the type's embedded interfaces to be fully defined, + // but we do not define any + return types.NewInterface(methods, nil).Complete() +} + +// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . +// +func (p *parser) parseChanType(parent *types.Package) types.Type { + dir := types.SendRecv + if p.tok == scanner.Ident { + p.expectKeyword("chan") + if p.tok == '<' { + p.expectSpecial("<-") + dir = types.SendOnly + } + } else { + p.expectSpecial("<-") + p.expectKeyword("chan") + dir = types.RecvOnly + } + elem := p.parseType(parent) + return types.NewChan(dir, elem) +} + +// Type = +// BasicType | TypeName | ArrayType | SliceType | StructType | +// PointerType | FuncType | InterfaceType | MapType | ChanType | +// "(" Type ")" . +// +// BasicType = ident . +// TypeName = ExportedName . +// SliceType = "[" "]" Type . +// PointerType = "*" Type . +// FuncType = "func" Signature . +// +func (p *parser) parseType(parent *types.Package) types.Type { + switch p.tok { + case scanner.Ident: + switch p.lit { + default: + return p.parseBasicType() + case "struct": + return p.parseStructType(parent) + case "func": + // FuncType + p.next() + return p.parseSignature(nil) + case "interface": + return p.parseInterfaceType(parent) + case "map": + return p.parseMapType(parent) + case "chan": + return p.parseChanType(parent) + } + case '@': + // TypeName + pkg, name := p.parseExportedName() + return declTypeName(pkg, name).Type() + case '[': + p.next() // look ahead + if p.tok == ']' { + // SliceType + p.next() + return types.NewSlice(p.parseType(parent)) + } + return p.parseArrayType(parent) + case '*': + // PointerType + p.next() + return types.NewPointer(p.parseType(parent)) + case '<': + return p.parseChanType(parent) + case '(': + // "(" Type ")" + p.next() + typ := p.parseType(parent) + p.expect(')') + return typ + } + p.errorf("expected type, got %s (%q)", scanner.TokenString(p.tok), p.lit) + return nil +} + +// ---------------------------------------------------------------------------- +// Declarations + +// ImportDecl = "import" PackageName PackageId . +// +func (p *parser) parseImportDecl() { + p.expectKeyword("import") + name := p.parsePackageName() + p.getPkg(p.parsePackageId(), name) +} + +// int_lit = [ "+" | "-" ] { "0" ... "9" } . +// +func (p *parser) parseInt() string { + s := "" + switch p.tok { + case '-': + s = "-" + p.next() + case '+': + p.next() + } + return s + p.expect(scanner.Int) +} + +// number = int_lit [ "p" int_lit ] . +// +func (p *parser) parseNumber() (typ *types.Basic, val constant.Value) { + // mantissa + mant := constant.MakeFromLiteral(p.parseInt(), token.INT, 0) + if mant == nil { + panic("invalid mantissa") + } + + if p.lit == "p" { + // exponent (base 2) + p.next() + exp, err := strconv.ParseInt(p.parseInt(), 10, 0) + if err != nil { + p.error(err) + } + if exp < 0 { + denom := constant.MakeInt64(1) + denom = constant.Shift(denom, token.SHL, uint(-exp)) + typ = types.Typ[types.UntypedFloat] + val = constant.BinaryOp(mant, token.QUO, denom) + return + } + if exp > 0 { + mant = constant.Shift(mant, token.SHL, uint(exp)) + } + typ = types.Typ[types.UntypedFloat] + val = mant + return + } + + typ = types.Typ[types.UntypedInt] + val = mant + return +} + +// ConstDecl = "const" ExportedName [ Type ] "=" Literal . +// Literal = bool_lit | int_lit | float_lit | complex_lit | rune_lit | string_lit . +// bool_lit = "true" | "false" . +// complex_lit = "(" float_lit "+" float_lit "i" ")" . +// rune_lit = "(" int_lit "+" int_lit ")" . +// string_lit = `"` { unicode_char } `"` . +// +func (p *parser) parseConstDecl() { + p.expectKeyword("const") + pkg, name := p.parseExportedName() + + var typ0 types.Type + if p.tok != '=' { + // constant types are never structured - no need for parent type + typ0 = p.parseType(nil) + } + + p.expect('=') + var typ types.Type + var val constant.Value + switch p.tok { + case scanner.Ident: + // bool_lit + if p.lit != "true" && p.lit != "false" { + p.error("expected true or false") + } + typ = types.Typ[types.UntypedBool] + val = constant.MakeBool(p.lit == "true") + p.next() + + case '-', scanner.Int: + // int_lit + typ, val = p.parseNumber() + + case '(': + // complex_lit or rune_lit + p.next() + if p.tok == scanner.Char { + p.next() + p.expect('+') + typ = types.Typ[types.UntypedRune] + _, val = p.parseNumber() + p.expect(')') + break + } + _, re := p.parseNumber() + p.expect('+') + _, im := p.parseNumber() + p.expectKeyword("i") + p.expect(')') + typ = types.Typ[types.UntypedComplex] + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + case scanner.Char: + // rune_lit + typ = types.Typ[types.UntypedRune] + val = constant.MakeFromLiteral(p.lit, token.CHAR, 0) + p.next() + + case scanner.String: + // string_lit + typ = types.Typ[types.UntypedString] + val = constant.MakeFromLiteral(p.lit, token.STRING, 0) + p.next() + + default: + p.errorf("expected literal got %s", scanner.TokenString(p.tok)) + } + + if typ0 == nil { + typ0 = typ + } + + pkg.Scope().Insert(types.NewConst(token.NoPos, pkg, name, typ0, val)) +} + +// TypeDecl = "type" ExportedName Type . +// +func (p *parser) parseTypeDecl() { + p.expectKeyword("type") + pkg, name := p.parseExportedName() + obj := declTypeName(pkg, name) + + // The type object may have been imported before and thus already + // have a type associated with it. We still need to parse the type + // structure, but throw it away if the object already has a type. + // This ensures that all imports refer to the same type object for + // a given type declaration. + typ := p.parseType(pkg) + + if name := obj.Type().(*types.Named); name.Underlying() == nil { + name.SetUnderlying(typ) + } +} + +// VarDecl = "var" ExportedName Type . +// +func (p *parser) parseVarDecl() { + p.expectKeyword("var") + pkg, name := p.parseExportedName() + typ := p.parseType(pkg) + pkg.Scope().Insert(types.NewVar(token.NoPos, pkg, name, typ)) +} + +// Func = Signature [ Body ] . +// Body = "{" ... "}" . +// +func (p *parser) parseFunc(recv *types.Var) *types.Signature { + sig := p.parseSignature(recv) + if p.tok == '{' { + p.next() + for i := 1; i > 0; p.next() { + switch p.tok { + case '{': + i++ + case '}': + i-- + } + } + } + return sig +} + +// MethodDecl = "func" Receiver Name Func . +// Receiver = "(" ( identifier | "?" ) [ "*" ] ExportedName ")" . +// +func (p *parser) parseMethodDecl() { + // "func" already consumed + p.expect('(') + recv, _ := p.parseParameter() // receiver + p.expect(')') + + // determine receiver base type object + base := deref(recv.Type()).(*types.Named) + + // parse method name, signature, and possibly inlined body + _, name := p.parseName(nil, false) + sig := p.parseFunc(recv) + + // methods always belong to the same package as the base type object + pkg := base.Obj().Pkg() + + // add method to type unless type was imported before + // and method exists already + // TODO(gri) This leads to a quadratic algorithm - ok for now because method counts are small. + base.AddMethod(types.NewFunc(token.NoPos, pkg, name, sig)) +} + +// FuncDecl = "func" ExportedName Func . +// +func (p *parser) parseFuncDecl() { + // "func" already consumed + pkg, name := p.parseExportedName() + typ := p.parseFunc(nil) + pkg.Scope().Insert(types.NewFunc(token.NoPos, pkg, name, typ)) +} + +// Decl = [ ImportDecl | ConstDecl | TypeDecl | VarDecl | FuncDecl | MethodDecl ] "\n" . +// +func (p *parser) parseDecl() { + if p.tok == scanner.Ident { + switch p.lit { + case "import": + p.parseImportDecl() + case "const": + p.parseConstDecl() + case "type": + p.parseTypeDecl() + case "var": + p.parseVarDecl() + case "func": + p.next() // look ahead + if p.tok == '(' { + p.parseMethodDecl() + } else { + p.parseFuncDecl() + } + } + } + p.expect('\n') +} + +// ---------------------------------------------------------------------------- +// Export + +// Export = "PackageClause { Decl } "$$" . +// PackageClause = "package" PackageName [ "safe" ] "\n" . +// +func (p *parser) parseExport() *types.Package { + p.expectKeyword("package") + name := p.parsePackageName() + if p.tok == scanner.Ident && p.lit == "safe" { + // package was compiled with -u option - ignore + p.next() + } + p.expect('\n') + + pkg := p.getPkg(p.id, name) + + for p.tok != '$' && p.tok != scanner.EOF { + p.parseDecl() + } + + if ch := p.scanner.Peek(); p.tok != '$' || ch != '$' { + // don't call next()/expect() since reading past the + // export data may cause scanner errors (e.g. NUL chars) + p.errorf("expected '$$', got %s %c", scanner.TokenString(p.tok), ch) + } + + if n := p.scanner.ErrorCount; n != 0 { + p.errorf("expected no scanner errors, got %d", n) + } + + // Record all locally referenced packages as imports. + var imports []*types.Package + for id, pkg2 := range p.localPkgs { + if pkg2.Name() == "" { + p.errorf("%s package has no name", id) + } + if id == p.id { + continue // avoid self-edge + } + imports = append(imports, pkg2) + } + sort.Sort(byPath(imports)) + pkg.SetImports(imports) + + // package was imported completely and without errors + pkg.MarkComplete() + + return pkg +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go new file mode 100644 index 000000000..4be32a2e5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iexport.go @@ -0,0 +1,739 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "go/ast" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" +) + +// Current indexed export format version. Increase with each format change. +// 0: Go1.11 encoding +const iexportVersion = 0 + +// IExportData returns the binary export data for pkg. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + + p := iexporter{ + out: bytes.NewBuffer(nil), + fset: fset, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + typIndex: map[types.Type]uint64{}, + localpkg: pkg, + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + scope := pkg.Scope() + for _, name := range scope.Names() { + if ast.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + w.flush() + + // Assemble header. + var hdr intWriter + hdr.WriteByte('i') + hdr.uint64(iexportVersion) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(p.out, &hdr) + io.Copy(p.out, &p.strings) + io.Copy(p.out, &p.data0) + + return p.out.Bytes(), nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]types.Object{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + pkgObjs[w.p.localpkg] = nil + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], obj) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].Name() < objs[j].Name() + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.Name()) + w.uint64(index[obj]) + } + } +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + + localpkg *types.Package + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + typIndex map[types.Type]uint64 +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + assert(obj.Pkg() != types.Unsafe) + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark n present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + w.tag('F') + w.pos(obj.Pos()) + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + break + } + + // Defined type. + w.tag('T') + w.pos(obj.Pos()) + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + t := obj.Type() + if types.IsInterface(t) { + break + } + + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedIdent(obj types.Object) { + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + + w.string(obj.Name()) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + switch t := t.(type) { + case *types.Named: + w.startType(definedType) + w.qualifiedIdent(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + w.setPkg(pkg, true) + + n := t.NumFields() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Embedded(i) + w.pos(f.Obj().Pos()) + w.typ(f.Obj().Type(), f.Obj().Pkg()) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + + switch v.Kind() { + case constant.Bool: + w.bool(constant.BoolVal(v)) + case constant.Int: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case constant.Float: + f := constantToFloat(v) + w.mpfloat(f, typ) + case constant.Complex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case constant.String: + w.string(constant.StringVal(v)) + case constant.Unknown: + // package contains type errors + default: + panic(internalErrorf("unexpected value %v (%T)", v, v)) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + assert(x.Kind() == constant.Float) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go new file mode 100644 index 000000000..a31a88026 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/iimport.go @@ -0,0 +1,630 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "sort" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType +) + +// IImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + const currentVersion = 1 + version := int64(-1) + defer func() { + if e := recover(); e != nil { + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + r := &intReader{bytes.NewReader(data), path} + + version = int64(r.uint64()) + switch version { + case currentVersion, 0: + default: + errorf("unknown iexport format version %d", version) + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + ipath: path, + version: int(version), + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*token.File), + }, + } + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + + p.pkgCache[pkgPathOff] = pkg + + nameIndex := make(map[string]uint64) + for nSyms := r.uint64(); nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + p.ipkg = pkgList[0] + names := make([]string, 0, len(p.pkgIndex[p.ipkg])) + for name := range p.pkgIndex[p.ipkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(p.ipkg, name) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + p.ipkg.SetImports(list) + + // package was imported completely and without errors + p.ipkg.MarkComplete() + + consumed, _ := r.Seek(0, io.SeekCurrent) + return int(consumed), p.ipkg, nil +} + +type iimporter struct { + ipath string + ipkg *types.Package + version int + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + + fake fakeFileSet + interfaceList []*types.Interface +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + if path == p.ipath { + return p.ipkg + } + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if base == nil || !isInterface(t) { + p.typCache[off] = t + } + return t +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F': + sig := r.signature(nil) + + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + r.declare(obj) + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + msig := r.signature(recv) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + val = r.mpint(b) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(b *types.Basic) constant.Value { + signed, maxBytes := intSize(b) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + return constant.MakeInt64(v) + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + + buf := make([]byte, v) + io.ReadFull(&r.declReader, buf) + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 { + buf[i], buf[j] = buf[j], buf[i] + } + + x := constant.MakeFromBytes(buf) + if signed && n&1 != 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +func (r *importReader) mpfloat(b *types.Basic) constant.Value { + x := r.mpint(b) + if constant.Sign(x) == 0 { + return x + } + + exp := r.int64() + switch { + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + } + return x +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.version >= 1 { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) types.Type { + switch k := r.kind(); k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return types.NewSignature(recv, params, results, variadic) +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go new file mode 100644 index 000000000..463f25227 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface10.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go new file mode 100644 index 000000000..ab28b95cb --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/newInterface11.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 000000000..db0c9a7ea --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "log" + "os" + "os/exec" + "strings" + "time" +) + +var debug = false + +// GetSizes returns the sizes used by the underlying driver with the given parameters. +func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver. + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + // We did not find the driver, so use "go list". + tool = "off" + } + } + + if tool == "off" { + return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData) + } + + req, err := json.Marshal(struct { + Command string `json:"command"` + Env []string `json:"env"` + BuildFlags []string `json:"build_flags"` + }{ + Command: "sizes", + Env: env, + BuildFlags: buildFlags, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, tool) + cmd.Dir = dir + cmd.Env = env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = new(bytes.Buffer) + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + } + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return response.Sizes, nil +} + +func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { + args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} + args = append(args, buildFlags...) + args = append(args, "--", "unsafe") + stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...) + var goarch, compiler string + if err != nil { + if strings.Contains(err.Error(), "cannot find main module") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") + if enverr != nil { + return nil, err + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, err + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>", + cmdDebugStr(env, args...), dir, stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} + +// invokeGo returns the stdout and stderr of a go command invocation. +func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { + if debug { + defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) + } + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(ctx, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, env...), "PWD="+dir) + cmd.Dir = dir + cmd.Stdout = stdout + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - executable not found + // - context cancellation + return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + if !usesExportData { + return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) + } + + // debugging + if false { + fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) + } + + return stdout, stderr, nil +} + +func cmdDebugStr(envlist []string, args ...string) string { + env := make(map[string]string) + for _, kv := range envlist { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 000000000..3799f8ed8 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,222 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages, with each mode returning all the data of the +previous mode with some extra added. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. + +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 000000000..6ac3e4f5b --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,100 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) + } + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 000000000..648e36431 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1143 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "os/exec" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/semver" + "golang.org/x/tools/internal/span" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +// init fills in r with a driverResponse. +func (r *responseDeduper) init(dr *driverResponse) { + r.dr = dr + r.seenRoots = map[string]bool{} + r.seenPackages = map[string]*Package{} + for _, pkg := range dr.Packages { + r.seenPackages[pkg.ID] = pkg + } + for _, root := range dr.Roots { + r.seenRoots[root] = true + } +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +// goInfo contains global information from the go tool. +type goInfo struct { + rootDirs map[string]string + env goEnv +} + +type goEnv struct { + modulesOn bool +} + +func determineEnv(cfg *Config) goEnv { + buf, err := invokeGo(cfg, "env", "GOMOD") + if err != nil { + return goEnv{} + } + gomod := bytes.TrimSpace(buf.Bytes()) + + env := goEnv{} + env.modulesOn = len(gomod) > 0 + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + var sizes types.Sizes + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + sizes, sizeserr = getSizes(cfg) + sizeswg.Done() + }() + } + defer sizeswg.Wait() + + // start fetching rootDirs + var info goInfo + var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) + go func() { + info.rootDirs = determineRootDirs(cfg) + close(rootDirsReady) + }() + go func() { + info.env = determineEnv(cfg) + close(envReady) + }() + getGoInfo := func() *goInfo { + <-rootDirsReady + <-envReady + return &info + } + + // Ensure that we don't leak goroutines: Load is synchronous, so callers will + // not expect it to access the fields of cfg after the call returns. + defer getGoInfo() + + // always pass getGoInfo to golistDriver + golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { + return golistDriver(cfg, getGoInfo, patterns...) + } + + // Determine files requested in contains patterns + var containFiles []string + var packagesNamed []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "iamashamedtousethedisabledqueryname": + packagesNamed = append(packagesNamed, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + response := &responseDeduper{} + var err error + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := golistDriver(cfg, restPatterns...) + if err != nil { + return nil, err + } + response.init(dr) + } else { + response.init(&driverResponse{}) + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + // types.SizesFor always returns nil or a *types.StdSizes + response.dr.Sizes, _ = sizes.(*types.StdSizes) + + var containsCandidates []string + + if len(containFiles) != 0 { + if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { + return nil, err + } + } + + if len(packagesNamed) != 0 { + if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + return nil, err + } + } + + modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + if err != nil { + return nil, err + } + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{ + { + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }, + }, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + + return response.dr, nil +} + +func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { + if len(pkgs) == 0 { + return nil + } + drivercfg := *cfg + if getGoInfo().env.modulesOn { + drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") + } + dr, err := driver(&drivercfg, pkgs...) + + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + if err != nil { + return err + } + return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) +} + +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := driver(cfg, pattern) + if err != nil { + var queryErr error + if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { + return err // return the original error + } + } + // `go list` can report errors for files that are not listed as part of a package's GoFiles. + // In the case of an invalid Go file, we should assume that it is part of package if only + // one package is in the response. The file may have valid contents in an overlay. + if len(dirResponse.Packages) == 1 { + pkg := dirResponse.Packages[0] + for i, err := range pkg.Errors { + s := errorSpan(err) + if !s.IsValid() { + break + } + if len(pkg.CompiledGoFiles) == 0 { + break + } + dir := filepath.Dir(pkg.CompiledGoFiles[0]) + filename := filepath.Join(dir, filepath.Base(s.URI().Filename())) + if info, err := os.Stat(filename); err != nil || info.IsDir() { + break + } + if !contains(pkg.CompiledGoFiles, filename) { + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...) + } + } + } + // A final attempt to construct an ad-hoc package. + if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adHocPackage attempts to construct an ad-hoc package given a query that failed. +func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) { + // There was an error loading the package. Try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're in modules mode + // and the ad-hoc is located outside a module. + dirResponse, err := driver(cfg, query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. + if len(dirResponse.Packages) == 0 && err == nil { + dirResponse.Packages = append(dirResponse.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") + } + // Special case to handle issue #33482: + // If this is a file= query for ad-hoc packages where the file only exists on an overlay, + // and exists outside of a module, add the file in for the package. + if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { + if len(dirResponse.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range cfg.Overlay { + if path == filename { + dirResponse.Packages[0].Errors = nil + dirResponse.Packages[0].GoFiles = []string{path} + dirResponse.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + return dirResponse, nil +} + +func contains(files []string, filename string) bool { + for _, f := range files { + if f == filename { + return true + } + } + return false +} + +// errorSpan attempts to parse a standard `go list` error message +// by stripping off the trailing error message. +// +// It works only on errors whose message is prefixed by colon, +// followed by a space (": "). For example: +// +// attributes.go:13:1: expected 'package', found 'type' +// +func errorSpan(err Error) span.Span { + if err.Pos == "" { + input := strings.TrimSpace(err.Msg) + msgIndex := strings.Index(input, ": ") + if msgIndex < 0 { + return span.Parse(input) + } + return span.Parse(input[:msgIndex]) + } + return span.Parse(err.Pos) +} + +// modCacheRegexp splits a path in a module cache into module, module version, and package. +var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) + +func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { + // calling `go env` isn't free; bail out if there's nothing to do. + if len(queries) == 0 { + return nil + } + // Determine which directories are relevant to scan. + roots, modRoot, err := roots(cfg) + if err != nil { + return err + } + + // Scan the selected directories. Simple matches, from GOPATH/GOROOT + // or the local module, can simply be "go list"ed. Matches from the + // module cache need special treatment. + var matchesMu sync.Mutex + var simpleMatches, modCacheMatches []string + add := func(root gopathwalk.Root, dir string) { + // Walk calls this concurrently; protect the result slices. + matchesMu.Lock() + defer matchesMu.Unlock() + + path := dir + if dir != root.Path { + path = dir[len(root.Path)+1:] + } + if pathMatchesQueries(path, queries) { + switch root.Type { + case gopathwalk.RootModuleCache: + modCacheMatches = append(modCacheMatches, path) + case gopathwalk.RootCurrentModule: + // We'd need to read go.mod to find the full + // import path. Relative's easier. + rel, err := filepath.Rel(cfg.Dir, dir) + if err != nil { + // This ought to be impossible, since + // we found dir in the current module. + panic(err) + } + simpleMatches = append(simpleMatches, "./"+rel) + case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: + simpleMatches = append(simpleMatches, path) + } + } + } + + startWalk := time.Now() + gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) + cfg.Logf("%v for walk", time.Since(startWalk)) + + // Weird special case: the top-level package in a module will be in + // whatever directory the user checked the repository out into. It's + // more reasonable for that to not match the package name. So, if there + // are any Go files in the mod root, query it just to be safe. + if modRoot != "" { + rel, err := filepath.Rel(cfg.Dir, modRoot) + if err != nil { + panic(err) // See above. + } + + files, err := ioutil.ReadDir(modRoot) + if err != nil { + panic(err) // See above. + } + + for _, f := range files { + if strings.HasSuffix(f.Name(), ".go") { + simpleMatches = append(simpleMatches, rel) + break + } + } + } + + addResponse := func(r *driverResponse) { + for _, pkg := range r.Packages { + response.addPackage(pkg) + for _, name := range queries { + if pkg.Name == name { + response.addRoot(pkg.ID) + break + } + } + } + } + + if len(simpleMatches) != 0 { + resp, err := driver(cfg, simpleMatches...) + if err != nil { + return err + } + addResponse(resp) + } + + // Module cache matches are tricky. We want to avoid downloading new + // versions of things, so we need to use the ones present in the cache. + // go list doesn't accept version specifiers, so we have to write out a + // temporary module, and do the list in that module. + if len(modCacheMatches) != 0 { + // Collect all the matches, deduplicating by major version + // and preferring the newest. + type modInfo struct { + mod string + major string + } + mods := make(map[modInfo]string) + var imports []string + for _, modPath := range modCacheMatches { + matches := modCacheRegexp.FindStringSubmatch(modPath) + mod, ver := filepath.ToSlash(matches[1]), matches[2] + importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) + + major := semver.Major(ver) + if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { + mods[modInfo{mod, major}] = ver + } + + imports = append(imports, importPath) + } + + // Build the temporary module. + var gomod bytes.Buffer + gomod.WriteString("module modquery\nrequire (\n") + for mod, version := range mods { + gomod.WriteString("\t" + mod.mod + " " + version + "\n") + } + gomod.WriteString(")\n") + + tmpCfg := *cfg + + // We're only trying to look at stuff in the module cache, so + // disable the network. This should speed things up, and has + // prevented errors in at least one case, #28518. + tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) + + var err error + tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") + if err != nil { + return err + } + defer os.RemoveAll(tmpCfg.Dir) + + if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { + return fmt.Errorf("writing go.mod for module cache query: %v", err) + } + + // Run the query, using the import paths calculated from the matches above. + resp, err := driver(&tmpCfg, imports...) + if err != nil { + return fmt.Errorf("querying module cache matches: %v", err) + } + addResponse(resp) + } + + return nil +} + +func getSizes(cfg *Config) (types.Sizes, error) { + return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) +} + +// roots selects the appropriate paths to walk based on the passed-in configuration, +// particularly the environment and the presence of a go.mod in cfg.Dir's parents. +func roots(cfg *Config) ([]gopathwalk.Root, string, error) { + stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") + if err != nil { + return nil, "", err + } + + fields := strings.Split(stdout.String(), "\n") + if len(fields) != 4 || len(fields[3]) != 0 { + return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) + } + goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] + var modDir string + if gomod != "" { + modDir = filepath.Dir(gomod) + } + + var roots []gopathwalk.Root + // Always add GOROOT. + roots = append(roots, gopathwalk.Root{ + Path: filepath.Join(goroot, "/src"), + Type: gopathwalk.RootGOROOT, + }) + // If modules are enabled, scan the module dir. + if modDir != "" { + roots = append(roots, gopathwalk.Root{ + Path: modDir, + Type: gopathwalk.RootCurrentModule, + }) + } + // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. + for _, p := range gopath { + if modDir != "" { + roots = append(roots, gopathwalk.Root{ + Path: filepath.Join(p, "/pkg/mod"), + Type: gopathwalk.RootModuleCache, + }) + } else { + roots = append(roots, gopathwalk.Root{ + Path: filepath.Join(p, "/src"), + Type: gopathwalk.RootGOPATH, + }) + } + } + + return roots, modDir, nil +} + +// These functions were copied from goimports. See further documentation there. + +// pathMatchesQueries is adapted from pkgIsCandidate. +// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? +func pathMatchesQueries(path string, queries []string) bool { + lastTwo := lastTwoComponents(path) + for _, query := range queries { + if strings.Contains(lastTwo, query) { + return true + } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, query) { + return true + } + } + } + return false +} + +// lastTwoComponents returns at most the last two path components +// of v, using either / or \ as the path separator. +func lastTwoComponents(v string) string { + nslash := 0 + for i := len(v) - 1; i >= 0; i-- { + if v[i] == '/' || v[i] == '\\' { + nslash++ + if nslash == 2 { + return v[i:] + } + } + } + return v +} + +func hasHyphenOrUpperASCII(s string) bool { + for i := 0; i < len(s); i++ { + b := s[i] + if b == '-' || ('A' <= b && b <= 'Z') { + return true + } + } + return false +} + +func lowerASCIIAndRemoveHyphen(s string) (ret string) { + buf := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == '-': + continue + case 'A' <= b && b <= 'Z': + buf = append(buf, b+('a'-'A')) + default: + buf = append(buf, b) + } + } + return string(buf) +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *jsonPackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// golistDriver uses the "go list" command to expand the pattern +// words and return metadata for the specified packages. dir may be +// "" and env may be nil, as per os/exec.Command. +func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + buf, err := invokeGo(cfg, golistargs(cfg, words)...) + if err != nil { + return nil, err + } + seen := make(map[string]*jsonPackage) + // Decode the JSON and convert it to Package form. + var response driverResponse + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + // skip the duplicate + continue + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Filter out any elements of CompiledGoFiles that are also in OtherFiles. + // We have to keep this workaround in place until go1.12 is a distant memory. + if len(pkg.OtherFiles) > 0 { + other := make(map[string]bool, len(pkg.OtherFiles)) + for _, f := range pkg.OtherFiles { + other[f] = true + } + + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if other[f] { + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + if p.Error != nil { + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. + }) + } + + response.Packages = append(response.Packages, pkg) + } + + return &response, nil +} + +// getPkgPath finds the package path of a directory if it's relative to a root directory. +func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { + absDir, err := filepath.Abs(dir) + if err != nil { + cfg.Logf("error getting absolute path of %s: %v", dir, err) + return "", false + } + for rdir, rpath := range goInfo().rootDirs { + absRdir, err := filepath.Abs(rdir) + if err != nil { + cfg.Logf("error getting absolute path of %s: %v", rdir, err) + continue + } + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, absRdir) { + cfg.Logf("%s does not have prefix %s", absDir, absRdir) + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true + } + return filepath.ToSlash(r), true + } + return "", false +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func golistargs(cfg *Config, words []string) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "list", "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// invokeGo returns the stdout of a go command invocation. +func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { + stdout := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, "go", args...) + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) + cmd.Dir = cfg.Dir + cmd.Stdout = stdout + cmd.Stderr = stderr + defer func(start time.Time) { + cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) + }(time.Now()) + + if err := cmd.Run(); err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, fmt.Errorf("%s", stderr.String()) + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) + } + } + + // As of writing, go list -export prints some non-fatal compilation + // errors to stderr, even with -e set. We would prefer that it put + // them in the Package.Error JSON (see https://golang.org/issue/26319). + // In the meantime, there's nowhere good to put them, but they can + // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS + // is set. + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) + } + return stdout, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd, args ...string) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + var quotedArgs []string + for _, arg := range args { + quotedArgs = append(quotedArgs, strconv.Quote(arg)) + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 000000000..a7de62299 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,293 @@ +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + "go/parser" + "go/token" + "path/filepath" + "strconv" + "strings" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + for opath, contents := range cfg.Overlay { + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // If we've already seen the test variant, + // make sure to label which package it is a test variant of. + if hasTestFiles(pkg) { + testVariantOf = p + continue nextPackage + } + // If we have already seen the package of which this is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package. + if pkg == nil { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok := getPkgPath(cfg, dir, rootDirs) + if !ok { + break + } + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + pkgPath += "_test" + } + id := pkgPath + if isTestFile && !isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + // Try to reclaim a package with the same id if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package + if pkg == nil { + pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)} + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + _, found := pkg.Imports[imp] + if !found { + overlayAddsImports = true + // TODO(matloob): Handle cases when the following block isn't correct. + // These include imports of vendored packages, etc. + id, ok := havePkgs[imp] + if !ok { + id = imp + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as wel. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + continue + } + + // toPkgPath tries to guess the package path given the id. + // This isn't always correct -- it's certainly wrong for + // vendored packages' paths. + toPkgPath := func(id string) string { + // TODO(matloob): Handle vendor paths. + i := strings.IndexByte(id, ' ') + if i >= 0 { + return id[:i] + } + return id + } + + // Do another pass now that new packages have been created to determine the + // set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + pkgPath := toPkgPath(imp.ID) + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from directories code can be contained in to the +// corresponding import path prefixes of those directories. +// Its result is used to try to determine the import path for a package containing +// an overlay file. +func determineRootDirs(cfg *Config) map[string]string { + // Assume modules first: + out, err := invokeGo(cfg, "list", "-m", "-json", "all") + if err != nil { + return determineRootDirsGOPATH(cfg) + } + m := map[string]string{} + type jsonMod struct{ Path, Dir string } + for dec := json.NewDecoder(out); dec.More(); { + mod := new(jsonMod) + if err := dec.Decode(mod); err != nil { + return m // Give up and return an empty map. Package won't be found for overlay. + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + m[mod.Dir] = mod.Path + } + } + return m +} + +func determineRootDirsGOPATH(cfg *Config) map[string]string { + m := map[string]string{} + out, err := invokeGo(cfg, "env", "GOPATH") + if err != nil { + // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. + // When we try to find the import path for a directory, there will be no root-dir match and + // we'll give up. + return m + } + for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { + m[filepath.Join(p, "src")] = "" + } + return m +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 000000000..aff94a3fe --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportsFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportsFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return fmt.Sprintf("LoadMode(0)") + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 000000000..050cca43a --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1116 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/gcexportdata" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportsFile adds ExportsFile. + NeedExportsFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response.Roots, response.Packages...) +} + +// defaultDriver is a driver that looks for an external driver binary, and if +// it does not find it falls back to the built in go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + return driver(cfg, patterns...) +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that were presented to the compiler. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range list { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + lpkg := &loaderPackage{ + Package: pkg, + needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, + needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || + len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files + pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, for extra de-Hyrum-ization. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportsFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + ld.loadFromExportData(lpkg) + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in non-initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if viewLen != len(view) { + log.Fatalf("Unexpected package creation during export data loading") + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 { + // If NeedTypesInfo, go/packages needs to do typechecking itself so it can + // associate type info with the AST. To do so, we need the export data + // for dependencies, which means we need to ask for the direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 { + // With NeedDeps we need to load at least direct dependencies. + // NeedImports is used to ask for the direct dependencies. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 000000000..b13cb081f --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,55 @@ +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go new file mode 100644 index 000000000..7219c8e9f --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -0,0 +1,196 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fastwalk provides a faster version of filepath.Walk for file system +// scanning tools. +package fastwalk + +import ( + "errors" + "os" + "path/filepath" + "runtime" + "sync" +) + +// TraverseLink is used as a return value from WalkFuncs to indicate that the +// symlink named in the call may be traversed. +var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") + +// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// callback should not be called for any other files in the current directory. +// Child directories will still be traversed. +var SkipFiles = errors.New("fastwalk: skip remaining files in directory") + +// Walk is a faster implementation of filepath.Walk. +// +// filepath.Walk's design necessarily calls os.Lstat on each file, +// even if the caller needs less info. +// Many tools need only the type of each file. +// On some platforms, this information is provided directly by the readdir +// system call, avoiding the need to stat each file individually. +// fastwalk_unix.go contains a fork of the syscall routines. +// +// See golang.org/issue/16399 +// +// Walk walks the file tree rooted at root, calling walkFn for +// each file or directory in the tree, including root. +// +// If fastWalk returns filepath.SkipDir, the directory is skipped. +// +// Unlike filepath.Walk: +// * file stat calls must be done by the user. +// The only provided metadata is the file type, which does not include +// any permission bits. +// * multiple goroutines stat the filesystem concurrently. The provided +// walkFn must be safe for concurrent use. +// * fastWalk can follow symlinks if walkFn returns the TraverseLink +// sentinel error. It is the walkFn's responsibility to prevent +// fastWalk from going into symlink cycles. +func Walk(root string, walkFn func(path string, typ os.FileMode) error) error { + // TODO(bradfitz): make numWorkers configurable? We used a + // minimum of 4 to give the kernel more info about multiple + // things we want, in hopes its I/O scheduling can take + // advantage of that. Hopefully most are in cache. Maybe 4 is + // even too low of a minimum. Profile more. + numWorkers := 4 + if n := runtime.NumCPU(); n > numWorkers { + numWorkers = n + } + + // Make sure to wait for all workers to finish, otherwise + // walkFn could still be called after returning. This Wait call + // runs after close(e.donec) below. + var wg sync.WaitGroup + defer wg.Wait() + + w := &walker{ + fn: walkFn, + enqueuec: make(chan walkItem, numWorkers), // buffered for performance + workc: make(chan walkItem, numWorkers), // buffered for performance + donec: make(chan struct{}), + + // buffered for correctness & not leaking goroutines: + resc: make(chan error, numWorkers), + } + defer close(w.donec) + + for i := 0; i < numWorkers; i++ { + wg.Add(1) + go w.doWork(&wg) + } + todo := []walkItem{{dir: root}} + out := 0 + for { + workc := w.workc + var workItem walkItem + if len(todo) == 0 { + workc = nil + } else { + workItem = todo[len(todo)-1] + } + select { + case workc <- workItem: + todo = todo[:len(todo)-1] + out++ + case it := <-w.enqueuec: + todo = append(todo, it) + case err := <-w.resc: + out-- + if err != nil { + return err + } + if out == 0 && len(todo) == 0 { + // It's safe to quit here, as long as the buffered + // enqueue channel isn't also readable, which might + // happen if the worker sends both another unit of + // work and its result before the other select was + // scheduled and both w.resc and w.enqueuec were + // readable. + select { + case it := <-w.enqueuec: + todo = append(todo, it) + default: + return nil + } + } + } + } +} + +// doWork reads directories as instructed (via workc) and runs the +// user's callback function. +func (w *walker) doWork(wg *sync.WaitGroup) { + defer wg.Done() + for { + select { + case <-w.donec: + return + case it := <-w.workc: + select { + case <-w.donec: + return + case w.resc <- w.walk(it.dir, !it.callbackDone): + } + } + } +} + +type walker struct { + fn func(path string, typ os.FileMode) error + + donec chan struct{} // closed on fastWalk's return + workc chan walkItem // to workers + enqueuec chan walkItem // from workers + resc chan error // from workers +} + +type walkItem struct { + dir string + callbackDone bool // callback already called; don't do it again +} + +func (w *walker) enqueue(it walkItem) { + select { + case w.enqueuec <- it: + case <-w.donec: + } +} + +func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { + joined := dirName + string(os.PathSeparator) + baseName + if typ == os.ModeDir { + w.enqueue(walkItem{dir: joined}) + return nil + } + + err := w.fn(joined, typ) + if typ == os.ModeSymlink { + if err == TraverseLink { + // Set callbackDone so we don't call it twice for both the + // symlink-as-symlink and the symlink-as-directory later: + w.enqueue(walkItem{dir: joined, callbackDone: true}) + return nil + } + if err == filepath.SkipDir { + // Permit SkipDir on symlinks too. + return nil + } + } + return err +} + +func (w *walker) walk(root string, runUserCallback bool) error { + if runUserCallback { + err := w.fn(root, os.ModeDir) + if err == filepath.SkipDir { + return nil + } + if err != nil { + return err + } + } + + return readDir(root, w.onDirEnt) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go new file mode 100644 index 000000000..ccffec5ad --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_fileno.go @@ -0,0 +1,13 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Fileno) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go new file mode 100644 index 000000000..ab7fbc0a9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_ino.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin +// +build !appengine + +package fastwalk + +import "syscall" + +func direntInode(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Ino) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go new file mode 100644 index 000000000..a3b26a7ba --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_bsd.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd openbsd netbsd + +package fastwalk + +import "syscall" + +func direntNamlen(dirent *syscall.Dirent) uint64 { + return uint64(dirent.Namlen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go new file mode 100644 index 000000000..e880d358b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_dirent_namlen_linux.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build !appengine + +package fastwalk + +import ( + "bytes" + "syscall" + "unsafe" +) + +func direntNamlen(dirent *syscall.Dirent) uint64 { + const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name)) + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + const nameBufLen = uint16(len(nameBuf)) + limit := dirent.Reclen - fixedHdr + if limit > nameBufLen { + limit = nameBufLen + } + nameLen := bytes.IndexByte(nameBuf[:limit], 0) + if nameLen < 0 { + panic("failed to find terminating 0 byte in dirent") + } + return uint64(nameLen) +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go new file mode 100644 index 000000000..a906b8759 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -0,0 +1,37 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd + +package fastwalk + +import ( + "io/ioutil" + "os" +) + +// readDir calls fn for each directory entry in dirName. +// It does not descend into directories or follow symlinks. +// If fn returns a non-nil error, readDir returns with that error +// immediately. +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fis, err := ioutil.ReadDir(dirName) + if err != nil { + return err + } + skipFiles := false + for _, fi := range fis { + if fi.Mode().IsRegular() && skipFiles { + continue + } + if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } + return nil +} diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go new file mode 100644 index 000000000..3369b1a0b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -0,0 +1,127 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd +// +build !appengine + +package fastwalk + +import ( + "fmt" + "os" + "syscall" + "unsafe" +) + +const blockSize = 8 << 10 + +// unknownFileMode is a sentinel (and bogus) os.FileMode +// value used to represent a syscall.DT_UNKNOWN Dirent.Type. +const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice + +func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error { + fd, err := syscall.Open(dirName, 0, 0) + if err != nil { + return &os.PathError{Op: "open", Path: dirName, Err: err} + } + defer syscall.Close(fd) + + // The buffer must be at least a block long. + buf := make([]byte, blockSize) // stack-allocated; doesn't escape + bufp := 0 // starting read position in buf + nbuf := 0 // end valid data in buf + skipFiles := false + for { + if bufp >= nbuf { + bufp = 0 + nbuf, err = syscall.ReadDirent(fd, buf) + if err != nil { + return os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + return nil + } + } + consumed, name, typ := parseDirEnt(buf[bufp:nbuf]) + bufp += consumed + if name == "" || name == "." || name == ".." { + continue + } + // Fallback for filesystems (like old XFS) that don't + // support Dirent.Type and have DT_UNKNOWN (0) there + // instead. + if typ == unknownFileMode { + fi, err := os.Lstat(dirName + "/" + name) + if err != nil { + // It got deleted in the meantime. + if os.IsNotExist(err) { + continue + } + return err + } + typ = fi.Mode() & os.ModeType + } + if skipFiles && typ.IsRegular() { + continue + } + if err := fn(dirName, name, typ); err != nil { + if err == SkipFiles { + skipFiles = true + continue + } + return err + } + } +} + +func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { + // golang.org/issue/15653 + dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { + panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) + } + if len(buf) < int(dirent.Reclen) { + panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen)) + } + consumed = int(dirent.Reclen) + if direntInode(dirent) == 0 { // File absent in directory. + return + } + switch dirent.Type { + case syscall.DT_REG: + typ = 0 + case syscall.DT_DIR: + typ = os.ModeDir + case syscall.DT_LNK: + typ = os.ModeSymlink + case syscall.DT_BLK: + typ = os.ModeDevice + case syscall.DT_FIFO: + typ = os.ModeNamedPipe + case syscall.DT_SOCK: + typ = os.ModeSocket + case syscall.DT_UNKNOWN: + typ = unknownFileMode + default: + // Skip weird things. + // It's probably a DT_WHT (http://lwn.net/Articles/325369/) + // or something. Revisit if/when this package is moved outside + // of goimports. goimports only cares about regular files, + // symlinks, and directories. + return + } + + nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0])) + nameLen := direntNamlen(dirent) + + // Special cases for common things: + if nameLen == 1 && nameBuf[0] == '.' { + name = "." + } else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' { + name = ".." + } else { + name = string(nameBuf[:nameLen]) + } + return +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go new file mode 100644 index 000000000..9a61bdbf5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -0,0 +1,270 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gopathwalk is like filepath.Walk but specialized for finding Go +// packages, particularly in $GOPATH and $GOROOT. +package gopathwalk + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/tools/internal/fastwalk" +) + +// Options controls the behavior of a Walk call. +type Options struct { + Debug bool // Enable debug logging + ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. +} + +// RootType indicates the type of a Root. +type RootType int + +const ( + RootUnknown RootType = iota + RootGOROOT + RootGOPATH + RootCurrentModule + RootModuleCache + RootOther +) + +// A Root is a starting point for a Walk. +type Root struct { + Path string + Type RootType +} + +// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible. +func SrcDirsRoots(ctx *build.Context) []Root { + var roots []Root + roots = append(roots, Root{filepath.Join(ctx.GOROOT, "src"), RootGOROOT}) + for _, p := range filepath.SplitList(ctx.GOPATH) { + roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH}) + } + return roots +} + +// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// add will be called concurrently. +func Walk(roots []Root, add func(root Root, dir string), opts Options) { + WalkSkip(roots, add, func(Root, string) bool { return false }, opts) +} + +// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages. +// For each package found, add will be called (concurrently) with the absolute +// paths of the containing source directory and the package directory. +// For each directory that will be scanned, skip will be called (concurrently) +// with the absolute paths of the containing source directory and the directory. +// If skip returns false on a directory it will be processed. +// add will be called concurrently. +// skip will be called concurrently. +func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) { + for _, root := range roots { + walkDir(root, add, skip, opts) + } +} + +func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { + if _, err := os.Stat(root.Path); os.IsNotExist(err) { + if opts.Debug { + log.Printf("skipping nonexistent directory: %v", root.Path) + } + return + } + start := time.Now() + if opts.Debug { + log.Printf("gopathwalk: scanning %s", root.Path) + } + w := &walker{ + root: root, + add: add, + skip: skip, + opts: opts, + } + w.init() + if err := fastwalk.Walk(root.Path, w.walk); err != nil { + log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) + } + + if opts.Debug { + log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + } +} + +// walker is the callback for fastwalk.Walk. +type walker struct { + root Root // The source directory to scan. + add func(Root, string) // The callback that will be invoked for every possible Go package dir. + skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true. + opts Options // Options passed to Walk by the user. + + ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. +} + +// init initializes the walker based on its Options. +func (w *walker) init() { + var ignoredPaths []string + if w.root.Type == RootModuleCache { + ignoredPaths = []string{"cache"} + } + if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH { + ignoredPaths = w.getIgnoredDirs(w.root.Path) + ignoredPaths = append(ignoredPaths, "v", "mod") + } + + for _, p := range ignoredPaths { + full := filepath.Join(w.root.Path, p) + if fi, err := os.Stat(full); err == nil { + w.ignoredDirs = append(w.ignoredDirs, fi) + if w.opts.Debug { + log.Printf("Directory added to ignore list: %s", full) + } + } else if w.opts.Debug { + log.Printf("Error statting ignored directory: %v", err) + } + } +} + +// getIgnoredDirs reads an optional config file at /.goimportsignore +// of relative directories to ignore when scanning for go files. +// The provided path is one of the $GOPATH entries with "src" appended. +func (w *walker) getIgnoredDirs(path string) []string { + file := filepath.Join(path, ".goimportsignore") + slurp, err := ioutil.ReadFile(file) + if w.opts.Debug { + if err != nil { + log.Print(err) + } else { + log.Printf("Read %s", file) + } + } + if err != nil { + return nil + } + + var ignoredDirs []string + bs := bufio.NewScanner(bytes.NewReader(slurp)) + for bs.Scan() { + line := strings.TrimSpace(bs.Text()) + if line == "" || strings.HasPrefix(line, "#") { + continue + } + ignoredDirs = append(ignoredDirs, line) + } + return ignoredDirs +} + +func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { + for _, ignoredDir := range w.ignoredDirs { + if os.SameFile(fi, ignoredDir) { + return true + } + } + if w.skip != nil { + // Check with the user specified callback. + return w.skip(w.root, dir) + } + return false +} + +func (w *walker) walk(path string, typ os.FileMode) error { + dir := filepath.Dir(path) + if typ.IsRegular() { + if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { + // Doesn't make sense to have regular files + // directly in your $GOPATH/src or $GOROOT/src. + return fastwalk.SkipFiles + } + if !strings.HasSuffix(path, ".go") { + return nil + } + + w.add(w.root, dir) + return fastwalk.SkipFiles + } + if typ == os.ModeDir { + base := filepath.Base(path) + if base == "" || base[0] == '.' || base[0] == '_' || + base == "testdata" || + (w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") || + (!w.opts.ModulesEnabled && base == "node_modules") { + return filepath.SkipDir + } + fi, err := os.Lstat(path) + if err == nil && w.shouldSkipDir(fi, path) { + return filepath.SkipDir + } + return nil + } + if typ == os.ModeSymlink { + base := filepath.Base(path) + if strings.HasPrefix(base, ".#") { + // Emacs noise. + return nil + } + fi, err := os.Lstat(path) + if err != nil { + // Just ignore it. + return nil + } + if w.shouldTraverse(dir, fi) { + return fastwalk.TraverseLink + } + } + return nil +} + +// shouldTraverse reports whether the symlink fi, found in dir, +// should be followed. It makes sure symlinks were never visited +// before to avoid symlink loops. +func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool { + path := filepath.Join(dir, fi.Name()) + target, err := filepath.EvalSymlinks(path) + if err != nil { + return false + } + ts, err := os.Stat(target) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return false + } + if !ts.IsDir() { + return false + } + if w.shouldSkipDir(ts, dir) { + return false + } + // Check for symlink loops by statting each directory component + // and seeing if any are the same file as ts. + for { + parent := filepath.Dir(path) + if parent == path { + // Made it to the root without seeing a cycle. + // Use this symlink. + return true + } + parentInfo, err := os.Stat(parent) + if err != nil { + return false + } + if os.SameFile(ts, parentInfo) { + // Cycle. Don't traverse. + return false + } + path = parent + } + +} diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/tools/internal/semver/semver.go new file mode 100644 index 000000000..4af7118e5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/semver/semver.go @@ -0,0 +1,388 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string + err string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// according to semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + p.err = "missing v prefix" + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad major version" + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + p.err = "bad minor prefix" + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad minor version" + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + p.err = "bad patch prefix" + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + p.err = "bad patch version" + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + p.err = "bad prerelease" + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + p.err = "bad build" + return + } + } + if v != "" { + p.err = "junk on end" + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/tools/internal/span/parse.go b/vendor/golang.org/x/tools/internal/span/parse.go new file mode 100644 index 000000000..b3f268a38 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/parse.go @@ -0,0 +1,100 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "strconv" + "strings" + "unicode/utf8" +) + +// Parse returns the location represented by the input. +// All inputs are valid locations, as they can always be a pure filename. +// The returned span will be normalized, and thus if printed may produce a +// different string. +func Parse(input string) Span { + // :0:0#0-0:0#0 + valid := input + var hold, offset int + hadCol := false + suf := rstripSuffix(input) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep == ":" { + valid = suf.remains + hold = suf.num + hadCol = true + suf = rstripSuffix(suf.remains) + } + switch { + case suf.sep == ":": + return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{}) + case suf.sep == "-": + // we have a span, fall out of the case to continue + default: + // separator not valid, rewind to either the : or the start + return New(NewURI(valid), NewPoint(hold, 0, offset), Point{}) + } + // only the span form can get here + // at this point we still don't know what the numbers we have mean + // if have not yet seen a : then we might have either a line or a column depending + // on whether start has a column or not + // we build an end point and will fix it later if needed + end := NewPoint(suf.num, hold, offset) + hold, offset = 0, 0 + suf = rstripSuffix(suf.remains) + if suf.sep == "#" { + offset = suf.num + suf = rstripSuffix(suf.remains) + } + if suf.sep != ":" { + // turns out we don't have a span after all, rewind + return New(NewURI(valid), end, Point{}) + } + valid = suf.remains + hold = suf.num + suf = rstripSuffix(suf.remains) + if suf.sep != ":" { + // line#offset only + return New(NewURI(valid), NewPoint(hold, 0, offset), end) + } + // we have a column, so if end only had one number, it is also the column + if !hadCol { + end = NewPoint(suf.num, end.v.Line, end.v.Offset) + } + return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end) +} + +type suffix struct { + remains string + sep string + num int +} + +func rstripSuffix(input string) suffix { + if len(input) == 0 { + return suffix{"", "", -1} + } + remains := input + num := -1 + // first see if we have a number at the end + last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) + if last >= 0 && last < len(remains)-1 { + number, err := strconv.ParseInt(remains[last+1:], 10, 64) + if err == nil { + num = int(number) + remains = remains[:last+1] + } + } + // now see if we have a trailing separator + r, w := utf8.DecodeLastRuneInString(remains) + if r != ':' && r != '#' && r == '#' { + return suffix{input, "", -1} + } + remains = remains[:len(remains)-w] + return suffix{remains, string(r), num} +} diff --git a/vendor/golang.org/x/tools/internal/span/span.go b/vendor/golang.org/x/tools/internal/span/span.go new file mode 100644 index 000000000..4d2ad0986 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/span.go @@ -0,0 +1,285 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package span contains support for representing with positions and ranges in +// text files. +package span + +import ( + "encoding/json" + "fmt" + "path" +) + +// Span represents a source code range in standardized form. +type Span struct { + v span +} + +// Point represents a single point within a file. +// In general this should only be used as part of a Span, as on its own it +// does not carry enough information. +type Point struct { + v point +} + +type span struct { + URI URI `json:"uri"` + Start point `json:"start"` + End point `json:"end"` +} + +type point struct { + Line int `json:"line"` + Column int `json:"column"` + Offset int `json:"offset"` +} + +// Invalid is a span that reports false from IsValid +var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} + +var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} + +// Converter is the interface to an object that can convert between line:column +// and offset forms for a single file. +type Converter interface { + //ToPosition converts from an offset to a line:column pair. + ToPosition(offset int) (int, int, error) + //ToOffset converts from a line:column pair to an offset. + ToOffset(line, col int) (int, error) +} + +func New(uri URI, start Point, end Point) Span { + s := Span{v: span{URI: uri, Start: start.v, End: end.v}} + s.v.clean() + return s +} + +func NewPoint(line, col, offset int) Point { + p := Point{v: point{Line: line, Column: col, Offset: offset}} + p.v.clean() + return p +} + +func Compare(a, b Span) int { + if r := CompareURI(a.URI(), b.URI()); r != 0 { + return r + } + if r := comparePoint(a.v.Start, b.v.Start); r != 0 { + return r + } + return comparePoint(a.v.End, b.v.End) +} + +func ComparePoint(a, b Point) int { + return comparePoint(a.v, b.v) +} + +func comparePoint(a, b point) int { + if !a.hasPosition() { + if a.Offset < b.Offset { + return -1 + } + if a.Offset > b.Offset { + return 1 + } + return 0 + } + if a.Line < b.Line { + return -1 + } + if a.Line > b.Line { + return 1 + } + if a.Column < b.Column { + return -1 + } + if a.Column > b.Column { + return 1 + } + return 0 +} + +func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } +func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } +func (s Span) IsValid() bool { return s.v.Start.isValid() } +func (s Span) IsPoint() bool { return s.v.Start == s.v.End } +func (s Span) URI() URI { return s.v.URI } +func (s Span) Start() Point { return Point{s.v.Start} } +func (s Span) End() Point { return Point{s.v.End} } +func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } +func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } + +func (p Point) HasPosition() bool { return p.v.hasPosition() } +func (p Point) HasOffset() bool { return p.v.hasOffset() } +func (p Point) IsValid() bool { return p.v.isValid() } +func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } +func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } +func (p Point) Line() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Line +} +func (p Point) Column() int { + if !p.v.hasPosition() { + panic(fmt.Errorf("position not set in %v", p.v)) + } + return p.v.Column +} +func (p Point) Offset() int { + if !p.v.hasOffset() { + panic(fmt.Errorf("offset not set in %v", p.v)) + } + return p.v.Offset +} + +func (p point) hasPosition() bool { return p.Line > 0 } +func (p point) hasOffset() bool { return p.Offset >= 0 } +func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } +func (p point) isZero() bool { + return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) +} + +func (s *span) clean() { + //this presumes the points are already clean + if !s.End.isValid() || (s.End == point{}) { + s.End = s.Start + } +} + +func (p *point) clean() { + if p.Line < 0 { + p.Line = 0 + } + if p.Column <= 0 { + if p.Line > 0 { + p.Column = 1 + } else { + p.Column = 0 + } + } + if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { + p.Offset = -1 + } +} + +// Format implements fmt.Formatter to print the Location in a standard form. +// The format produced is one that can be read back in using Parse. +func (s Span) Format(f fmt.State, c rune) { + fullForm := f.Flag('+') + preferOffset := f.Flag('#') + // we should always have a uri, simplify if it is file format + //TODO: make sure the end of the uri is unambiguous + uri := string(s.v.URI) + if c == 'f' { + uri = path.Base(uri) + } else if !fullForm { + uri = s.v.URI.Filename() + } + fmt.Fprint(f, uri) + if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { + return + } + // see which bits of start to write + printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) + printLine := s.HasPosition() && (fullForm || !printOffset) + printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) + fmt.Fprint(f, ":") + if printLine { + fmt.Fprintf(f, "%d", s.v.Start.Line) + } + if printColumn { + fmt.Fprintf(f, ":%d", s.v.Start.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.Start.Offset) + } + // start is written, do we need end? + if s.IsPoint() { + return + } + // we don't print the line if it did not change + printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) + fmt.Fprint(f, "-") + if printLine { + fmt.Fprintf(f, "%d", s.v.End.Line) + } + if printColumn { + if printLine { + fmt.Fprint(f, ":") + } + fmt.Fprintf(f, "%d", s.v.End.Column) + } + if printOffset { + fmt.Fprintf(f, "#%d", s.v.End.Offset) + } +} + +func (s Span) WithPosition(c Converter) (Span, error) { + if err := s.update(c, true, false); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithOffset(c Converter) (Span, error) { + if err := s.update(c, false, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s Span) WithAll(c Converter) (Span, error) { + if err := s.update(c, true, true); err != nil { + return Span{}, err + } + return s, nil +} + +func (s *Span) update(c Converter, withPos, withOffset bool) error { + if !s.IsValid() { + return fmt.Errorf("cannot add information to an invalid span") + } + if withPos && !s.HasPosition() { + if err := s.v.Start.updatePosition(c); err != nil { + return err + } + if s.v.End.Offset == s.v.Start.Offset { + s.v.End = s.v.Start + } else if err := s.v.End.updatePosition(c); err != nil { + return err + } + } + if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { + if err := s.v.Start.updateOffset(c); err != nil { + return err + } + if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { + s.v.End.Offset = s.v.Start.Offset + } else if err := s.v.End.updateOffset(c); err != nil { + return err + } + } + return nil +} + +func (p *point) updatePosition(c Converter) error { + line, col, err := c.ToPosition(p.Offset) + if err != nil { + return err + } + p.Line = line + p.Column = col + return nil +} + +func (p *point) updateOffset(c Converter) error { + offset, err := c.ToOffset(p.Line, p.Column) + if err != nil { + return err + } + p.Offset = offset + return nil +} diff --git a/vendor/golang.org/x/tools/internal/span/token.go b/vendor/golang.org/x/tools/internal/span/token.go new file mode 100644 index 000000000..01b5ed2d0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/token.go @@ -0,0 +1,161 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "go/token" +) + +// Range represents a source code range in token.Pos form. +// It also carries the FileSet that produced the positions, so that it is +// self contained. +type Range struct { + FileSet *token.FileSet + Start token.Pos + End token.Pos +} + +// TokenConverter is a Converter backed by a token file set and file. +// It uses the file set methods to work out the conversions, which +// makes it fast and does not require the file contents. +type TokenConverter struct { + fset *token.FileSet + file *token.File +} + +// NewRange creates a new Range from a FileSet and two positions. +// To represent a point pass a 0 as the end pos. +func NewRange(fset *token.FileSet, start, end token.Pos) Range { + return Range{ + FileSet: fset, + Start: start, + End: end, + } +} + +// NewTokenConverter returns an implementation of Converter backed by a +// token.File. +func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { + return &TokenConverter{fset: fset, file: f} +} + +// NewContentConverter returns an implementation of Converter for the +// given file content. +func NewContentConverter(filename string, content []byte) *TokenConverter { + fset := token.NewFileSet() + f := fset.AddFile(filename, -1, len(content)) + f.SetLinesForContent(content) + return &TokenConverter{fset: fset, file: f} +} + +// IsPoint returns true if the range represents a single point. +func (r Range) IsPoint() bool { + return r.Start == r.End +} + +// Span converts a Range to a Span that represents the Range. +// It will fill in all the members of the Span, calculating the line and column +// information. +func (r Range) Span() (Span, error) { + f := r.FileSet.File(r.Start) + if f == nil { + return Span{}, fmt.Errorf("file not found in FileSet") + } + s := Span{} + var err error + s.v.Start.Offset, err = offset(f, r.Start) + if err != nil { + return Span{}, err + } + if r.End.IsValid() { + s.v.End.Offset, err = offset(f, r.End) + if err != nil { + return Span{}, err + } + } + // In the presence of line directives, a single File can have sections from + // multiple file names. + filename := f.Position(r.Start).Filename + if r.End.IsValid() { + if endFilename := f.Position(r.End).Filename; filename != endFilename { + return Span{}, fmt.Errorf("span begins in file %q but ends in %q", filename, endFilename) + } + } + s.v.URI = FileURI(filename) + + s.v.Start.clean() + s.v.End.clean() + s.v.clean() + converter := NewTokenConverter(r.FileSet, f) + return s.WithPosition(converter) +} + +// offset is a copy of the Offset function in go/token, but with the adjustment +// that it does not panic on invalid positions. +func offset(f *token.File, pos token.Pos) (int, error) { + if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { + return 0, fmt.Errorf("invalid pos") + } + return int(pos) - f.Base(), nil +} + +// Range converts a Span to a Range that represents the Span for the supplied +// File. +func (s Span) Range(converter *TokenConverter) (Range, error) { + s, err := s.WithOffset(converter) + if err != nil { + return Range{}, err + } + // go/token will panic if the offset is larger than the file's size, + // so check here to avoid panicking. + if s.Start().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) + } + if s.End().Offset() > converter.file.Size() { + return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) + } + return Range{ + FileSet: converter.fset, + Start: converter.file.Pos(s.Start().Offset()), + End: converter.file.Pos(s.End().Offset()), + }, nil +} + +func (l *TokenConverter) ToPosition(offset int) (int, int, error) { + if offset > l.file.Size() { + return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size()) + } + pos := l.file.Pos(offset) + p := l.fset.Position(pos) + if offset == l.file.Size() { + return p.Line + 1, 1, nil + } + return p.Line, p.Column, nil +} + +func (l *TokenConverter) ToOffset(line, col int) (int, error) { + if line < 0 { + return -1, fmt.Errorf("line is not valid") + } + lineMax := l.file.LineCount() + 1 + if line > lineMax { + return -1, fmt.Errorf("line is beyond end of file %v", lineMax) + } else if line == lineMax { + if col > 1 { + return -1, fmt.Errorf("column is beyond end of file") + } + // at the end of the file, allowing for a trailing eol + return l.file.Size(), nil + } + pos := lineStart(l.file, line) + if !pos.IsValid() { + return -1, fmt.Errorf("line is not in file") + } + // we assume that column is in bytes here, and that the first byte of a + // line is at column 1 + pos += token.Pos(col - 1) + return offset(l.file, pos) +} diff --git a/vendor/golang.org/x/tools/internal/span/token111.go b/vendor/golang.org/x/tools/internal/span/token111.go new file mode 100644 index 000000000..bf7a5406b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/token111.go @@ -0,0 +1,39 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package span + +import ( + "go/token" +) + +// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go +// versions <= 1.11, we borrow logic from the analysisutil package. +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + // Use binary search to find the start offset of this line. + + min := 0 // inclusive + max := f.Size() // exclusive + for { + offset := (min + max) / 2 + pos := f.Pos(offset) + posn := f.Position(pos) + if posn.Line == line { + return pos - (token.Pos(posn.Column) - 1) + } + + if min+1 >= max { + return token.NoPos + } + + if posn.Line < line { + min = offset + } else { + max = offset + } + } +} diff --git a/vendor/golang.org/x/tools/internal/span/token112.go b/vendor/golang.org/x/tools/internal/span/token112.go new file mode 100644 index 000000000..017aec9c1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/token112.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package span + +import ( + "go/token" +) + +// TODO(rstambler): Delete this file when we no longer support Go 1.11. +func lineStart(f *token.File, line int) token.Pos { + return f.LineStart(line) +} diff --git a/vendor/golang.org/x/tools/internal/span/uri.go b/vendor/golang.org/x/tools/internal/span/uri.go new file mode 100644 index 000000000..e05a9e6ef --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/uri.go @@ -0,0 +1,152 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "net/url" + "os" + "path" + "path/filepath" + "runtime" + "strings" + "unicode" +) + +const fileScheme = "file" + +// URI represents the full URI for a file. +type URI string + +// Filename returns the file path for the given URI. +// It is an error to call this on a URI that is not a valid filename. +func (uri URI) Filename() string { + filename, err := filename(uri) + if err != nil { + panic(err) + } + return filepath.FromSlash(filename) +} + +func filename(uri URI) (string, error) { + if uri == "" { + return "", nil + } + u, err := url.ParseRequestURI(string(uri)) + if err != nil { + return "", err + } + if u.Scheme != fileScheme { + return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) + } + if isWindowsDriveURI(u.Path) { + u.Path = u.Path[1:] + } + return u.Path, nil +} + +// NewURI returns a span URI for the string. +// It will attempt to detect if the string is a file path or uri. +func NewURI(s string) URI { + if u, err := url.PathUnescape(s); err == nil { + s = u + } + if strings.HasPrefix(s, fileScheme+"://") { + return URI(s) + } + return FileURI(s) +} + +func CompareURI(a, b URI) int { + if equalURI(a, b) { + return 0 + } + if a < b { + return -1 + } + return 1 +} + +func equalURI(a, b URI) bool { + if a == b { + return true + } + // If we have the same URI basename, we may still have the same file URIs. + if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { + return false + } + fa, err := filename(a) + if err != nil { + return false + } + fb, err := filename(b) + if err != nil { + return false + } + // Stat the files to check if they are equal. + infoa, err := os.Stat(filepath.FromSlash(fa)) + if err != nil { + return false + } + infob, err := os.Stat(filepath.FromSlash(fb)) + if err != nil { + return false + } + return os.SameFile(infoa, infob) +} + +// FileURI returns a span URI for the supplied file path. +// It will always have the file scheme. +func FileURI(path string) URI { + if path == "" { + return "" + } + // Handle standard library paths that contain the literal "$GOROOT". + // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. + const prefix = "$GOROOT" + if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { + suffix := path[len(prefix):] + path = runtime.GOROOT() + suffix + } + if !isWindowsDrivePath(path) { + if abs, err := filepath.Abs(path); err == nil { + path = abs + } + } + // Check the file path again, in case it became absolute. + if isWindowsDrivePath(path) { + path = "/" + path + } + path = filepath.ToSlash(path) + u := url.URL{ + Scheme: fileScheme, + Path: path, + } + uri := u.String() + if unescaped, err := url.PathUnescape(uri); err == nil { + uri = unescaped + } + return URI(uri) +} + +// isWindowsDrivePath returns true if the file path is of the form used by +// Windows. We check if the path begins with a drive letter, followed by a ":". +func isWindowsDrivePath(path string) bool { + if len(path) < 4 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +// isWindowsDriveURI returns true if the file URI is of the format used by +// Windows URIs. The url.Parse package does not specially handle Windows paths +// (see https://golang.org/issue/6027). We check if the URI path has +// a drive prefix (e.g. "/C:"). If so, we trim the leading "/". +func isWindowsDriveURI(uri string) bool { + if len(uri) < 4 { + return false + } + return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' +} diff --git a/vendor/golang.org/x/tools/internal/span/utf16.go b/vendor/golang.org/x/tools/internal/span/utf16.go new file mode 100644 index 000000000..561b3fa50 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/span/utf16.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package span + +import ( + "fmt" + "unicode/utf16" + "unicode/utf8" +) + +// ToUTF16Column calculates the utf16 column expressed by the point given the +// supplied file contents. +// This is used to convert from the native (always in bytes) column +// representation and the utf16 counts used by some editors. +func ToUTF16Column(p Point, content []byte) (int, error) { + if content == nil { + return -1, fmt.Errorf("ToUTF16Column: missing content") + } + if !p.HasPosition() { + return -1, fmt.Errorf("ToUTF16Column: point is missing position") + } + if !p.HasOffset() { + return -1, fmt.Errorf("ToUTF16Column: point is missing offset") + } + offset := p.Offset() // 0-based + colZero := p.Column() - 1 // 0-based + if colZero == 0 { + // 0-based column 0, so it must be chr 1 + return 1, nil + } else if colZero < 0 { + return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) + } + // work out the offset at the start of the line using the column + lineOffset := offset - colZero + if lineOffset < 0 || offset > len(content) { + return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) + } + // Use the offset to pick out the line start. + // This cannot panic: offset > len(content) and lineOffset < offset. + start := content[lineOffset:] + + // Now, truncate down to the supplied column. + start = start[:colZero] + + // and count the number of utf16 characters + // in theory we could do this by hand more efficiently... + return len(utf16.Encode([]rune(string(start)))) + 1, nil +} + +// FromUTF16Column advances the point by the utf16 character offset given the +// supplied line contents. +// This is used to convert from the utf16 counts used by some editors to the +// native (always in bytes) column representation. +func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { + if !p.HasOffset() { + return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") + } + // if chr is 1 then no adjustment needed + if chr <= 1 { + return p, nil + } + if p.Offset() >= len(content) { + return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) + } + remains := content[p.Offset():] + // scan forward the specified number of characters + for count := 1; count < chr; count++ { + if len(remains) <= 0 { + return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") + } + r, w := utf8.DecodeRune(remains) + if r == '\n' { + // Per the LSP spec: + // + // > If the character value is greater than the line length it + // > defaults back to the line length. + break + } + remains = remains[w:] + if r >= 0x10000 { + // a two point rune + count++ + // if we finished in a two point rune, do not advance past the first + if count >= chr { + break + } + } + p.v.Column += w + p.v.Offset += w + } + return p, nil +} diff --git a/vendor/gopkg.in/imdario/mergo.v0/.gitignore b/vendor/gopkg.in/imdario/mergo.v0/.gitignore new file mode 100644 index 000000000..529c3412b --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/.gitignore @@ -0,0 +1,33 @@ +#### joe made this: http://goel.io/joe + +#### go #### +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +#### vim #### +# Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +# Session +Session.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags diff --git a/vendor/gopkg.in/imdario/mergo.v0/.travis.yml b/vendor/gopkg.in/imdario/mergo.v0/.travis.yml new file mode 100644 index 000000000..b13a50ed1 --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/.travis.yml @@ -0,0 +1,7 @@ +language: go +install: + - go get -t + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/gopkg.in/imdario/mergo.v0/CODE_OF_CONDUCT.md b/vendor/gopkg.in/imdario/mergo.v0/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..469b44907 --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/gopkg.in/imdario/mergo.v0/LICENSE b/vendor/gopkg.in/imdario/mergo.v0/LICENSE new file mode 100644 index 000000000..686680298 --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/imdario/mergo.v0/README.md b/vendor/gopkg.in/imdario/mergo.v0/README.md new file mode 100644 index 000000000..02fc81e06 --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/README.md @@ -0,0 +1,238 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +[![GoDoc][3]][4] +[![GoCard][5]][6] +[![Build Status][1]][2] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield) + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge + +### Latest release + +[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7). + +### Important note + +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## Top Contributors + +[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) +[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) +[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) +[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) +[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) +[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) +[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) +[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) + + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/gopkg.in/imdario/mergo.v0/doc.go b/vendor/gopkg.in/imdario/mergo.v0/doc.go new file mode 100644 index 000000000..6e9aa7baf --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/gopkg.in/imdario/mergo.v0/map.go b/vendor/gopkg.in/imdario/mergo.v0/map.go new file mode 100644 index 000000000..3f5afa83a --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/map.go @@ -0,0 +1,175 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + config.overwriteWithEmptyValue = true + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/gopkg.in/imdario/mergo.v0/merge.go b/vendor/gopkg.in/imdario/mergo.v0/merge.go new file mode 100644 index 000000000..f8de6c543 --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/merge.go @@ -0,0 +1,255 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers + overwriteWithEmptyValue bool +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + overwriteWithEmptySrc := config.overwriteWithEmptyValue + config.overwriteWithEmptyValue = false + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { + continue + } + + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/gopkg.in/imdario/mergo.v0/mergo.go b/vendor/gopkg.in/imdario/mergo.v0/mergo.go new file mode 100644 index 000000000..a82fea2fd --- /dev/null +++ b/vendor/gopkg.in/imdario/mergo.v0/mergo.go @@ -0,0 +1,97 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/gopkg.in/op/go-logging.v1/.travis.yml b/vendor/gopkg.in/op/go-logging.v1/.travis.yml new file mode 100644 index 000000000..70e012b81 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.0 + - 1.1 + - tip diff --git a/vendor/gopkg.in/op/go-logging.v1/CONTRIBUTORS b/vendor/gopkg.in/op/go-logging.v1/CONTRIBUTORS new file mode 100644 index 000000000..958416ef1 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/CONTRIBUTORS @@ -0,0 +1,5 @@ +Alec Thomas +Guilhem Lettron +Ivan Daniluk +Nimi Wariboko Jr +Róbert Selvek diff --git a/vendor/gopkg.in/op/go-logging.v1/LICENSE b/vendor/gopkg.in/op/go-logging.v1/LICENSE new file mode 100644 index 000000000..f1f6cfcef --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013 Örjan Persson. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/op/go-logging.v1/README.md b/vendor/gopkg.in/op/go-logging.v1/README.md new file mode 100644 index 000000000..9999d90f5 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/README.md @@ -0,0 +1,89 @@ +## Golang logging library + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/op/go-logging) [![build](https://img.shields.io/travis/op/go-logging.svg?style=flat)](https://travis-ci.org/op/go-logging) + +Package logging implements a logging infrastructure for Go. Its output format +is customizable and supports different logging backends like syslog, file and +memory. Multiple backends can be utilized with different log levels per backend +and logger. + +## Example + +Let's have a look at an [example](examples/example.go) which demonstrates most +of the features found in this library. + +[![Example Output](examples/example.png)](examples/example.go) + +```go +package main + +import ( + "os" + + "github.com/op/go-logging" +) + +var log = logging.MustGetLogger("example") + +// Example format string. Everything except the message has a custom color +// which is dependent on the log level. Many fields have a custom output +// formatting too, eg. the time returns the hour down to the milli second. +var format = logging.MustStringFormatter( + `%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}`, +) + +// Password is just an example type implementing the Redactor interface. Any +// time this is logged, the Redacted() function will be called. +type Password string + +func (p Password) Redacted() interface{} { + return logging.Redact(string(p)) +} + +func main() { + // For demo purposes, create two backend for os.Stderr. + backend1 := logging.NewLogBackend(os.Stderr, "", 0) + backend2 := logging.NewLogBackend(os.Stderr, "", 0) + + // For messages written to backend2 we want to add some additional + // information to the output, including the used log level and the name of + // the function. + backend2Formatter := logging.NewBackendFormatter(backend2, format) + + // Only errors and more severe messages should be sent to backend1 + backend1Leveled := logging.AddModuleLevel(backend1) + backend1Leveled.SetLevel(logging.ERROR, "") + + // Set the backends to be used. + logging.SetBackend(backend1Leveled, backend2Formatter) + + log.Debugf("debug %s", Password("secret")) + log.Info("info") + log.Notice("notice") + log.Warning("warning") + log.Error("err") + log.Critical("crit") +} +``` + +## Installing + +### Using *go get* + + $ go get github.com/op/go-logging + +After this command *go-logging* is ready to use. Its source will be in: + + $GOPATH/src/pkg/github.com/op/go-logging + +You can use `go get -u` to update the package. + +## Documentation + +For docs, see http://godoc.org/github.com/op/go-logging or run: + + $ godoc github.com/op/go-logging + +## Additional resources + +* [wslog](https://godoc.org/github.com/cryptix/go/logging/wslog) -- exposes log messages through a WebSocket. diff --git a/vendor/gopkg.in/op/go-logging.v1/backend.go b/vendor/gopkg.in/op/go-logging.v1/backend.go new file mode 100644 index 000000000..74d920108 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/backend.go @@ -0,0 +1,39 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logging + +// defaultBackend is the backend used for all logging calls. +var defaultBackend LeveledBackend + +// Backend is the interface which a log backend need to implement to be able to +// be used as a logging backend. +type Backend interface { + Log(Level, int, *Record) error +} + +// SetBackend replaces the backend currently set with the given new logging +// backend. +func SetBackend(backends ...Backend) LeveledBackend { + var backend Backend + if len(backends) == 1 { + backend = backends[0] + } else { + backend = MultiLogger(backends...) + } + + defaultBackend = AddModuleLevel(backend) + return defaultBackend +} + +// SetLevel sets the logging level for the specified module. The module +// corresponds to the string specified in GetLogger. +func SetLevel(level Level, module string) { + defaultBackend.SetLevel(level, module) +} + +// GetLevel returns the logging level for the specified module. +func GetLevel(module string) Level { + return defaultBackend.GetLevel(module) +} diff --git a/vendor/gopkg.in/op/go-logging.v1/format.go b/vendor/gopkg.in/op/go-logging.v1/format.go new file mode 100644 index 000000000..2d6a4044f --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/format.go @@ -0,0 +1,400 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logging + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "runtime" + "strings" + "sync" + "time" +) + +// TODO see Formatter interface in fmt/print.go +// TODO try text/template, maybe it have enough performance +// TODO other template systems? +// TODO make it possible to specify formats per backend? +type fmtVerb int + +const ( + fmtVerbTime fmtVerb = iota + fmtVerbLevel + fmtVerbID + fmtVerbPid + fmtVerbProgram + fmtVerbModule + fmtVerbMessage + fmtVerbLongfile + fmtVerbShortfile + fmtVerbLongpkg + fmtVerbShortpkg + fmtVerbLongfunc + fmtVerbShortfunc + fmtVerbCallpath + fmtVerbLevelColor + + // Keep last, there are no match for these below. + fmtVerbUnknown + fmtVerbStatic +) + +var fmtVerbs = []string{ + "time", + "level", + "id", + "pid", + "program", + "module", + "message", + "longfile", + "shortfile", + "longpkg", + "shortpkg", + "longfunc", + "shortfunc", + "callpath", + "color", +} + +const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00" + +var defaultVerbsLayout = []string{ + rfc3339Milli, + "s", + "d", + "d", + "s", + "s", + "s", + "s", + "s", + "s", + "s", + "s", + "s", + "s", + "", +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) +) + +func getFmtVerbByName(name string) fmtVerb { + for i, verb := range fmtVerbs { + if name == verb { + return fmtVerb(i) + } + } + return fmtVerbUnknown +} + +// Formatter is the required interface for a custom log record formatter. +type Formatter interface { + Format(calldepth int, r *Record, w io.Writer) error +} + +// formatter is used by all backends unless otherwise overriden. +var formatter struct { + sync.RWMutex + def Formatter +} + +func getFormatter() Formatter { + formatter.RLock() + defer formatter.RUnlock() + return formatter.def +} + +var ( + // DefaultFormatter is the default formatter used and is only the message. + DefaultFormatter = MustStringFormatter("%{message}") + + // GlogFormatter mimics the glog format + GlogFormatter = MustStringFormatter("%{level:.1s}%{time:0102 15:04:05.999999} %{pid} %{shortfile}] %{message}") +) + +// SetFormatter sets the default formatter for all new backends. A backend will +// fetch this value once it is needed to format a record. Note that backends +// will cache the formatter after the first point. For now, make sure to set +// the formatter before logging. +func SetFormatter(f Formatter) { + formatter.Lock() + defer formatter.Unlock() + formatter.def = f +} + +var formatRe = regexp.MustCompile(`%{([a-z]+)(?::(.*?[^\\]))?}`) + +type part struct { + verb fmtVerb + layout string +} + +// stringFormatter contains a list of parts which explains how to build the +// formatted string passed on to the logging backend. +type stringFormatter struct { + parts []part +} + +// NewStringFormatter returns a new Formatter which outputs the log record as a +// string based on the 'verbs' specified in the format string. +// +// The verbs: +// +// General: +// %{id} Sequence number for log message (uint64). +// %{pid} Process id (int) +// %{time} Time when log occurred (time.Time) +// %{level} Log level (Level) +// %{module} Module (string) +// %{program} Basename of os.Args[0] (string) +// %{message} Message (string) +// %{longfile} Full file name and line number: /a/b/c/d.go:23 +// %{shortfile} Final file name element and line number: d.go:23 +// %{callpath} Callpath like main.a.b.c...c "..." meaning recursive call +// %{color} ANSI color based on log level +// +// For normal types, the output can be customized by using the 'verbs' defined +// in the fmt package, eg. '%{id:04d}' to make the id output be '%04d' as the +// format string. +// +// For time.Time, use the same layout as time.Format to change the time format +// when output, eg "2006-01-02T15:04:05.999Z-07:00". +// +// For the 'color' verb, the output can be adjusted to either use bold colors, +// i.e., '%{color:bold}' or to reset the ANSI attributes, i.e., +// '%{color:reset}' Note that if you use the color verb explicitly, be sure to +// reset it or else the color state will persist past your log message. e.g., +// "%{color:bold}%{time:15:04:05} %{level:-8s}%{color:reset} %{message}" will +// just colorize the time and level, leaving the message uncolored. +// +// Colors on Windows is unfortunately not supported right now and is currently +// a no-op. +// +// There's also a couple of experimental 'verbs'. These are exposed to get +// feedback and needs a bit of tinkering. Hence, they might change in the +// future. +// +// Experimental: +// %{longpkg} Full package path, eg. github.com/go-logging +// %{shortpkg} Base package path, eg. go-logging +// %{longfunc} Full function name, eg. littleEndian.PutUint32 +// %{shortfunc} Base function name, eg. PutUint32 +// %{callpath} Call function path, eg. main.a.b.c +func NewStringFormatter(format string) (Formatter, error) { + var fmter = &stringFormatter{} + + // Find the boundaries of all %{vars} + matches := formatRe.FindAllStringSubmatchIndex(format, -1) + if matches == nil { + return nil, errors.New("logger: invalid log format: " + format) + } + + // Collect all variables and static text for the format + prev := 0 + for _, m := range matches { + start, end := m[0], m[1] + if start > prev { + fmter.add(fmtVerbStatic, format[prev:start]) + } + + name := format[m[2]:m[3]] + verb := getFmtVerbByName(name) + if verb == fmtVerbUnknown { + return nil, errors.New("logger: unknown variable: " + name) + } + + // Handle layout customizations or use the default. If this is not for the + // time or color formatting, we need to prefix with %. + layout := defaultVerbsLayout[verb] + if m[4] != -1 { + layout = format[m[4]:m[5]] + } + if verb != fmtVerbTime && verb != fmtVerbLevelColor { + layout = "%" + layout + } + + fmter.add(verb, layout) + prev = end + } + end := format[prev:] + if end != "" { + fmter.add(fmtVerbStatic, end) + } + + // Make a test run to make sure we can format it correctly. + t, err := time.Parse(time.RFC3339, "2010-02-04T21:00:57-08:00") + if err != nil { + panic(err) + } + r := &Record{ + Id: 12345, + Time: t, + Module: "logger", + Args: []interface{}{"go"}, + fmt: "hello %s", + } + if err := fmter.Format(0, r, &bytes.Buffer{}); err != nil { + return nil, err + } + + return fmter, nil +} + +// MustStringFormatter is equivalent to NewStringFormatter with a call to panic +// on error. +func MustStringFormatter(format string) Formatter { + f, err := NewStringFormatter(format) + if err != nil { + panic("Failed to initialized string formatter: " + err.Error()) + } + return f +} + +func (f *stringFormatter) add(verb fmtVerb, layout string) { + f.parts = append(f.parts, part{verb, layout}) +} + +func (f *stringFormatter) Format(calldepth int, r *Record, output io.Writer) error { + for _, part := range f.parts { + if part.verb == fmtVerbStatic { + output.Write([]byte(part.layout)) + } else if part.verb == fmtVerbTime { + output.Write([]byte(r.Time.Format(part.layout))) + } else if part.verb == fmtVerbLevelColor { + doFmtVerbLevelColor(part.layout, r.Level, output) + } else { + var v interface{} + switch part.verb { + case fmtVerbLevel: + v = r.Level + break + case fmtVerbID: + v = r.Id + break + case fmtVerbPid: + v = pid + break + case fmtVerbProgram: + v = program + break + case fmtVerbModule: + v = r.Module + break + case fmtVerbMessage: + v = r.Message() + break + case fmtVerbLongfile, fmtVerbShortfile: + _, file, line, ok := runtime.Caller(calldepth + 1) + if !ok { + file = "???" + line = 0 + } else if part.verb == fmtVerbShortfile { + file = filepath.Base(file) + } + v = fmt.Sprintf("%s:%d", file, line) + case fmtVerbLongfunc, fmtVerbShortfunc, + fmtVerbLongpkg, fmtVerbShortpkg: + // TODO cache pc + v = "???" + if pc, _, _, ok := runtime.Caller(calldepth + 1); ok { + if f := runtime.FuncForPC(pc); f != nil { + v = formatFuncName(part.verb, f.Name()) + } + } + case fmtVerbCallpath: + v = formatCallpath(calldepth + 1) + default: + panic("unhandled format part") + } + fmt.Fprintf(output, part.layout, v) + } + } + return nil +} + +// formatFuncName tries to extract certain part of the runtime formatted +// function name to some pre-defined variation. +// +// This function is known to not work properly if the package path or name +// contains a dot. +func formatFuncName(v fmtVerb, f string) string { + i := strings.LastIndex(f, "/") + j := strings.Index(f[i+1:], ".") + if j < 1 { + return "???" + } + pkg, fun := f[:i+j+1], f[i+j+2:] + switch v { + case fmtVerbLongpkg: + return pkg + case fmtVerbShortpkg: + return path.Base(pkg) + case fmtVerbLongfunc: + return fun + case fmtVerbShortfunc: + i = strings.LastIndex(fun, ".") + return fun[i+1:] + } + panic("unexpected func formatter") +} + +func formatCallpath(calldepth int) string { + v := "" + callers := make([]uintptr, 64) + n := runtime.Callers(calldepth+2, callers) + oldPc := callers[n-1] + + recursiveCall := false + for i := n - 3; i >= 0; i-- { + pc := callers[i] + if oldPc == pc { + recursiveCall = true + continue + } + oldPc = pc + if recursiveCall { + recursiveCall = false + v += ".." + } + if i < n-3 { + v += "." + } + if f := runtime.FuncForPC(pc); f != nil { + v += formatFuncName(fmtVerbShortfunc, f.Name()) + } + } + return v +} + +// backendFormatter combines a backend with a specific formatter making it +// possible to have different log formats for different backends. +type backendFormatter struct { + b Backend + f Formatter +} + +// NewBackendFormatter creates a new backend which makes all records that +// passes through it beeing formatted by the specific formatter. +func NewBackendFormatter(b Backend, f Formatter) Backend { + return &backendFormatter{b, f} +} + +// Log implements the Log function required by the Backend interface. +func (bf *backendFormatter) Log(level Level, calldepth int, r *Record) error { + // Make a shallow copy of the record and replace any formatter + r2 := *r + r2.formatter = bf.f + return bf.b.Log(level, calldepth+1, &r2) +} diff --git a/vendor/gopkg.in/op/go-logging.v1/level.go b/vendor/gopkg.in/op/go-logging.v1/level.go new file mode 100644 index 000000000..98dd19187 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/level.go @@ -0,0 +1,128 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logging + +import ( + "errors" + "strings" + "sync" +) + +// ErrInvalidLogLevel is used when an invalid log level has been used. +var ErrInvalidLogLevel = errors.New("logger: invalid log level") + +// Level defines all available log levels for log messages. +type Level int + +// Log levels. +const ( + CRITICAL Level = iota + ERROR + WARNING + NOTICE + INFO + DEBUG +) + +var levelNames = []string{ + "CRITICAL", + "ERROR", + "WARNING", + "NOTICE", + "INFO", + "DEBUG", +} + +// String returns the string representation of a logging level. +func (p Level) String() string { + return levelNames[p] +} + +// LogLevel returns the log level from a string representation. +func LogLevel(level string) (Level, error) { + for i, name := range levelNames { + if strings.EqualFold(name, level) { + return Level(i), nil + } + } + return ERROR, ErrInvalidLogLevel +} + +// Leveled interface is the interface required to be able to add leveled +// logging. +type Leveled interface { + GetLevel(string) Level + SetLevel(Level, string) + IsEnabledFor(Level, string) bool +} + +// LeveledBackend is a log backend with additional knobs for setting levels on +// individual modules to different levels. +type LeveledBackend interface { + Backend + Leveled +} + +type moduleLeveled struct { + levels map[string]Level + backend Backend + formatter Formatter + once sync.Once +} + +// AddModuleLevel wraps a log backend with knobs to have different log levels +// for different modules. +func AddModuleLevel(backend Backend) LeveledBackend { + var leveled LeveledBackend + var ok bool + if leveled, ok = backend.(LeveledBackend); !ok { + leveled = &moduleLeveled{ + levels: make(map[string]Level), + backend: backend, + } + } + return leveled +} + +// GetLevel returns the log level for the given module. +func (l *moduleLeveled) GetLevel(module string) Level { + level, exists := l.levels[module] + if exists == false { + level, exists = l.levels[""] + // no configuration exists, default to debug + if exists == false { + level = DEBUG + } + } + return level +} + +// SetLevel sets the log level for the given module. +func (l *moduleLeveled) SetLevel(level Level, module string) { + l.levels[module] = level +} + +// IsEnabledFor will return true if logging is enabled for the given module. +func (l *moduleLeveled) IsEnabledFor(level Level, module string) bool { + return level <= l.GetLevel(module) +} + +func (l *moduleLeveled) Log(level Level, calldepth int, rec *Record) (err error) { + if l.IsEnabledFor(level, rec.Module) { + // TODO get rid of traces of formatter here. BackendFormatter should be used. + rec.formatter = l.getFormatterAndCacheCurrent() + err = l.backend.Log(level, calldepth+1, rec) + } + return +} + +func (l *moduleLeveled) getFormatterAndCacheCurrent() Formatter { + l.once.Do(func() { + if l.formatter == nil { + l.formatter = getFormatter() + } + }) + return l.formatter +} diff --git a/vendor/gopkg.in/op/go-logging.v1/log_nix.go b/vendor/gopkg.in/op/go-logging.v1/log_nix.go new file mode 100644 index 000000000..4ff2ab1ab --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/log_nix.go @@ -0,0 +1,109 @@ +// +build !windows + +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logging + +import ( + "bytes" + "fmt" + "io" + "log" +) + +type color int + +const ( + ColorBlack = iota + 30 + ColorRed + ColorGreen + ColorYellow + ColorBlue + ColorMagenta + ColorCyan + ColorWhite +) + +var ( + colors = []string{ + CRITICAL: ColorSeq(ColorMagenta), + ERROR: ColorSeq(ColorRed), + WARNING: ColorSeq(ColorYellow), + NOTICE: ColorSeq(ColorGreen), + DEBUG: ColorSeq(ColorCyan), + } + boldcolors = []string{ + CRITICAL: ColorSeqBold(ColorMagenta), + ERROR: ColorSeqBold(ColorRed), + WARNING: ColorSeqBold(ColorYellow), + NOTICE: ColorSeqBold(ColorGreen), + DEBUG: ColorSeqBold(ColorCyan), + } +) + +// LogBackend utilizes the standard log module. +type LogBackend struct { + Logger *log.Logger + Color bool + ColorConfig []string +} + +// NewLogBackend creates a new LogBackend. +func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { + return &LogBackend{Logger: log.New(out, prefix, flag)} +} + +// Log implements the Backend interface. +func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { + if b.Color { + col := colors[level] + if len(b.ColorConfig) > int(level) && b.ColorConfig[level] != "" { + col = b.ColorConfig[level] + } + + buf := &bytes.Buffer{} + buf.Write([]byte(col)) + buf.Write([]byte(rec.Formatted(calldepth + 1))) + buf.Write([]byte("\033[0m")) + // For some reason, the Go logger arbitrarily decided "2" was the correct + // call depth... + return b.Logger.Output(calldepth+2, buf.String()) + } + + return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) +} + +// ConvertColors takes a list of ints representing colors for log levels and +// converts them into strings for ANSI color formatting +func ConvertColors(colors []int, bold bool) []string { + converted := []string{} + for _, i := range colors { + if bold { + converted = append(converted, ColorSeqBold(color(i))) + } else { + converted = append(converted, ColorSeq(color(i))) + } + } + + return converted +} + +func ColorSeq(color color) string { + return fmt.Sprintf("\033[%dm", int(color)) +} + +func ColorSeqBold(color color) string { + return fmt.Sprintf("\033[%d;1m", int(color)) +} + +func doFmtVerbLevelColor(layout string, level Level, output io.Writer) { + if layout == "bold" { + output.Write([]byte(boldcolors[level])) + } else if layout == "reset" { + output.Write([]byte("\033[0m")) + } else { + output.Write([]byte(colors[level])) + } +} diff --git a/vendor/gopkg.in/op/go-logging.v1/log_windows.go b/vendor/gopkg.in/op/go-logging.v1/log_windows.go new file mode 100644 index 000000000..b8dc92c67 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/log_windows.go @@ -0,0 +1,107 @@ +// +build windows +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logging + +import ( + "bytes" + "io" + "log" + "syscall" +) + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") +) + +// Character attributes +// Note: +// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). +// Clearing all foreground or background colors results in black; setting all creates white. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. +const ( + fgBlack = 0x0000 + fgBlue = 0x0001 + fgGreen = 0x0002 + fgCyan = 0x0003 + fgRed = 0x0004 + fgMagenta = 0x0005 + fgYellow = 0x0006 + fgWhite = 0x0007 + fgIntensity = 0x0008 + fgMask = 0x000F +) + +var ( + colors = []uint16{ + INFO: fgWhite, + CRITICAL: fgMagenta, + ERROR: fgRed, + WARNING: fgYellow, + NOTICE: fgGreen, + DEBUG: fgCyan, + } + boldcolors = []uint16{ + INFO: fgWhite | fgIntensity, + CRITICAL: fgMagenta | fgIntensity, + ERROR: fgRed | fgIntensity, + WARNING: fgYellow | fgIntensity, + NOTICE: fgGreen | fgIntensity, + DEBUG: fgCyan | fgIntensity, + } +) + +type file interface { + Fd() uintptr +} + +// LogBackend utilizes the standard log module. +type LogBackend struct { + Logger *log.Logger + Color bool + + // f is set to a non-nil value if the underlying writer which logs writes to + // implements the file interface. This makes us able to colorise the output. + f file +} + +// NewLogBackend creates a new LogBackend. +func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { + b := &LogBackend{Logger: log.New(out, prefix, flag)} + + // Unfortunately, the API used only takes an io.Writer where the Windows API + // need the actual fd to change colors. + if f, ok := out.(file); ok { + b.f = f + } + + return b +} + +func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { + if b.Color && b.f != nil { + buf := &bytes.Buffer{} + setConsoleTextAttribute(b.f, colors[level]) + buf.Write([]byte(rec.Formatted(calldepth + 1))) + err := b.Logger.Output(calldepth+2, buf.String()) + setConsoleTextAttribute(b.f, fgWhite) + return err + } + return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) +} + +// setConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func setConsoleTextAttribute(f file, attribute uint16) bool { + ok, _, _ := setConsoleTextAttributeProc.Call(f.Fd(), uintptr(attribute), 0) + return ok != 0 +} + +func doFmtVerbLevelColor(layout string, level Level, output io.Writer) { + // TODO not supported on Windows since the io.Writer here is actually a + // bytes.Buffer. +} diff --git a/vendor/gopkg.in/op/go-logging.v1/logger.go b/vendor/gopkg.in/op/go-logging.v1/logger.go new file mode 100644 index 000000000..8e4ebab96 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/logger.go @@ -0,0 +1,249 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package logging implements a logging infrastructure for Go. It supports +// different logging backends like syslog, file and memory. Multiple backends +// can be utilized with different log levels per backend and logger. +package logging + +import ( + "bytes" + "fmt" + "log" + "os" + "strings" + "sync/atomic" + "time" +) + +// Redactor is an interface for types that may contain sensitive information +// (like passwords), which shouldn't be printed to the log. The idea was found +// in relog as part of the vitness project. +type Redactor interface { + Redacted() interface{} +} + +// Redact returns a string of * having the same length as s. +func Redact(s string) string { + return strings.Repeat("*", len(s)) +} + +var ( + // Sequence number is incremented and utilized for all log records created. + sequenceNo uint64 + + // timeNow is a customizable for testing purposes. + timeNow = time.Now +) + +// Record represents a log record and contains the timestamp when the record +// was created, an increasing id, filename and line and finally the actual +// formatted log line. +type Record struct { + Id uint64 + Time time.Time + Module string + Level Level + Args []interface{} + + // message is kept as a pointer to have shallow copies update this once + // needed. + message *string + fmt string + formatter Formatter + formatted string +} + +// Formatted returns the formatted log record string. +func (r *Record) Formatted(calldepth int) string { + if r.formatted == "" { + var buf bytes.Buffer + r.formatter.Format(calldepth+1, r, &buf) + r.formatted = buf.String() + } + return r.formatted +} + +// Message returns the log record message. +func (r *Record) Message() string { + if r.message == nil { + // Redact the arguments that implements the Redactor interface + for i, arg := range r.Args { + if redactor, ok := arg.(Redactor); ok == true { + r.Args[i] = redactor.Redacted() + } + } + msg := fmt.Sprintf(r.fmt, r.Args...) + r.message = &msg + } + return *r.message +} + +// Logger is the actual logger which creates log records based on the functions +// called and passes them to the underlying logging backend. +type Logger struct { + Module string + backend LeveledBackend + haveBackend bool + + // ExtraCallDepth can be used to add additional call depth when getting the + // calling function. This is normally used when wrapping a logger. + ExtraCalldepth int +} + +// SetBackend overrides any previously defined backend for this logger. +func (l *Logger) SetBackend(backend LeveledBackend) { + l.backend = backend + l.haveBackend = true +} + +// TODO call NewLogger and remove MustGetLogger? + +// GetLogger creates and returns a Logger object based on the module name. +func GetLogger(module string) (*Logger, error) { + return &Logger{Module: module}, nil +} + +// MustGetLogger is like GetLogger but panics if the logger can't be created. +// It simplifies safe initialization of a global logger for eg. a package. +func MustGetLogger(module string) *Logger { + logger, err := GetLogger(module) + if err != nil { + panic("logger: " + module + ": " + err.Error()) + } + return logger +} + +// Reset restores the internal state of the logging library. +func Reset() { + // TODO make a global Init() method to be less magic? or make it such that + // if there's no backends at all configured, we could use some tricks to + // automatically setup backends based if we have a TTY or not. + sequenceNo = 0 + b := SetBackend(NewLogBackend(os.Stderr, "", log.LstdFlags)) + b.SetLevel(DEBUG, "") + SetFormatter(DefaultFormatter) + timeNow = time.Now +} + +// IsEnabledFor returns true if the logger is enabled for the given level. +func (l *Logger) IsEnabledFor(level Level) bool { + return defaultBackend.IsEnabledFor(level, l.Module) +} + +func (l *Logger) log(lvl Level, format string, args ...interface{}) { + if !l.IsEnabledFor(lvl) { + return + } + + // Create the logging record and pass it in to the backend + record := &Record{ + Id: atomic.AddUint64(&sequenceNo, 1), + Time: timeNow(), + Module: l.Module, + Level: lvl, + fmt: format, + Args: args, + } + + // TODO use channels to fan out the records to all backends? + // TODO in case of errors, do something (tricky) + + // calldepth=2 brings the stack up to the caller of the level + // methods, Info(), Fatal(), etc. + // ExtraCallDepth allows this to be extended further up the stack in case we + // are wrapping these methods, eg. to expose them package level + if l.haveBackend { + l.backend.Log(lvl, 2+l.ExtraCalldepth, record) + return + } + + defaultBackend.Log(lvl, 2+l.ExtraCalldepth, record) +} + +// Fatal is equivalent to l.Critical(fmt.Sprint()) followed by a call to os.Exit(1). +func (l *Logger) Fatal(args ...interface{}) { + s := fmt.Sprint(args...) + l.log(CRITICAL, "%s", s) + os.Exit(1) +} + +// Fatalf is equivalent to l.Critical followed by a call to os.Exit(1). +func (l *Logger) Fatalf(format string, args ...interface{}) { + l.log(CRITICAL, format, args...) + os.Exit(1) +} + +// Panic is equivalent to l.Critical(fmt.Sprint()) followed by a call to panic(). +func (l *Logger) Panic(args ...interface{}) { + s := fmt.Sprint(args...) + l.log(CRITICAL, "%s", s) + panic(s) +} + +// Panicf is equivalent to l.Critical followed by a call to panic(). +func (l *Logger) Panicf(format string, args ...interface{}) { + s := fmt.Sprintf(format, args...) + l.log(CRITICAL, "%s", s) + panic(s) +} + +// Critical logs a message using CRITICAL as log level. +func (l *Logger) Critical(format string, args ...interface{}) { + l.log(CRITICAL, format, args...) +} + +// Error logs a message using ERROR as log level. +func (l *Logger) Error(format string, args ...interface{}) { + l.log(ERROR, format, args...) +} + +// Errorf logs a message using ERROR as log level. +func (l *Logger) Errorf(format string, args ...interface{}) { + l.log(ERROR, format, args...) +} + +// Warning logs a message using WARNING as log level. +func (l *Logger) Warning(format string, args ...interface{}) { + l.log(WARNING, format, args...) +} + +// Warningf logs a message using WARNING as log level. +func (l *Logger) Warningf(format string, args ...interface{}) { + l.log(WARNING, format, args...) +} + +// Notice logs a message using NOTICE as log level. +func (l *Logger) Notice(format string, args ...interface{}) { + l.log(NOTICE, format, args...) +} + +// Noticef logs a message using NOTICE as log level. +func (l *Logger) Noticef(format string, args ...interface{}) { + l.log(NOTICE, format, args...) +} + +// Info logs a message using INFO as log level. +func (l *Logger) Info(format string, args ...interface{}) { + l.log(INFO, format, args...) +} + +// Infof logs a message using INFO as log level. +func (l *Logger) Infof(format string, args ...interface{}) { + l.log(INFO, format, args...) +} + +// Debug logs a message using DEBUG as log level. +func (l *Logger) Debug(format string, args ...interface{}) { + l.log(DEBUG, format, args...) +} + +// Debugf logs a message using DEBUG as log level. +func (l *Logger) Debugf(format string, args ...interface{}) { + l.log(DEBUG, format, args...) +} + +func init() { + Reset() +} diff --git a/vendor/gopkg.in/op/go-logging.v1/memory.go b/vendor/gopkg.in/op/go-logging.v1/memory.go new file mode 100644 index 000000000..8d5152c0b --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/memory.go @@ -0,0 +1,237 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logging + +import ( + "sync" + "sync/atomic" + "time" + "unsafe" +) + +// TODO pick one of the memory backends and stick with it or share interface. + +// InitForTesting is a convenient method when using logging in a test. Once +// called, the time will be frozen to January 1, 1970 UTC. +func InitForTesting(level Level) *MemoryBackend { + Reset() + + memoryBackend := NewMemoryBackend(10240) + + leveledBackend := AddModuleLevel(memoryBackend) + leveledBackend.SetLevel(level, "") + SetBackend(leveledBackend) + + timeNow = func() time.Time { + return time.Unix(0, 0).UTC() + } + return memoryBackend +} + +// Node is a record node pointing to an optional next node. +type node struct { + next *node + Record *Record +} + +// Next returns the next record node. If there's no node available, it will +// return nil. +func (n *node) Next() *node { + return n.next +} + +// MemoryBackend is a simple memory based logging backend that will not produce +// any output but merly keep records, up to the given size, in memory. +type MemoryBackend struct { + size int32 + maxSize int32 + head, tail unsafe.Pointer +} + +// NewMemoryBackend creates a simple in-memory logging backend. +func NewMemoryBackend(size int) *MemoryBackend { + return &MemoryBackend{maxSize: int32(size)} +} + +// Log implements the Log method required by Backend. +func (b *MemoryBackend) Log(level Level, calldepth int, rec *Record) error { + var size int32 + + n := &node{Record: rec} + np := unsafe.Pointer(n) + + // Add the record to the tail. If there's no records available, tail and + // head will both be nil. When we successfully set the tail and the previous + // value was nil, it's safe to set the head to the current value too. + for { + tailp := b.tail + swapped := atomic.CompareAndSwapPointer( + &b.tail, + tailp, + np, + ) + if swapped == true { + if tailp == nil { + b.head = np + } else { + (*node)(tailp).next = n + } + size = atomic.AddInt32(&b.size, 1) + break + } + } + + // Since one record was added, we might have overflowed the list. Remove + // a record if that is the case. The size will fluctate a bit, but + // eventual consistent. + if b.maxSize > 0 && size > b.maxSize { + for { + headp := b.head + head := (*node)(b.head) + if head.next == nil { + break + } + swapped := atomic.CompareAndSwapPointer( + &b.head, + headp, + unsafe.Pointer(head.next), + ) + if swapped == true { + atomic.AddInt32(&b.size, -1) + break + } + } + } + return nil +} + +// Head returns the oldest record node kept in memory. It can be used to +// iterate over records, one by one, up to the last record. +// +// Note: new records can get added while iterating. Hence the number of records +// iterated over might be larger than the maximum size. +func (b *MemoryBackend) Head() *node { + return (*node)(b.head) +} + +type event int + +const ( + eventFlush event = iota + eventStop +) + +// ChannelMemoryBackend is very similar to the MemoryBackend, except that it +// internally utilizes a channel. +type ChannelMemoryBackend struct { + maxSize int + size int + incoming chan *Record + events chan event + mu sync.Mutex + running bool + flushWg sync.WaitGroup + stopWg sync.WaitGroup + head, tail *node +} + +// NewChannelMemoryBackend creates a simple in-memory logging backend which +// utilizes a go channel for communication. +// +// Start will automatically be called by this function. +func NewChannelMemoryBackend(size int) *ChannelMemoryBackend { + backend := &ChannelMemoryBackend{ + maxSize: size, + incoming: make(chan *Record, 1024), + events: make(chan event), + } + backend.Start() + return backend +} + +// Start launches the internal goroutine which starts processing data from the +// input channel. +func (b *ChannelMemoryBackend) Start() { + b.mu.Lock() + defer b.mu.Unlock() + + // Launch the goroutine unless it's already running. + if b.running != true { + b.running = true + b.stopWg.Add(1) + go b.process() + } +} + +func (b *ChannelMemoryBackend) process() { + defer b.stopWg.Done() + for { + select { + case rec := <-b.incoming: + b.insertRecord(rec) + case e := <-b.events: + switch e { + case eventStop: + return + case eventFlush: + for len(b.incoming) > 0 { + b.insertRecord(<-b.incoming) + } + b.flushWg.Done() + } + } + } +} + +func (b *ChannelMemoryBackend) insertRecord(rec *Record) { + prev := b.tail + b.tail = &node{Record: rec} + if prev == nil { + b.head = b.tail + } else { + prev.next = b.tail + } + + if b.maxSize > 0 && b.size >= b.maxSize { + b.head = b.head.next + } else { + b.size++ + } +} + +// Flush waits until all records in the buffered channel have been processed. +func (b *ChannelMemoryBackend) Flush() { + b.flushWg.Add(1) + b.events <- eventFlush + b.flushWg.Wait() +} + +// Stop signals the internal goroutine to exit and waits until it have. +func (b *ChannelMemoryBackend) Stop() { + b.mu.Lock() + if b.running == true { + b.running = false + b.events <- eventStop + } + b.mu.Unlock() + b.stopWg.Wait() +} + +// Log implements the Log method required by Backend. +func (b *ChannelMemoryBackend) Log(level Level, calldepth int, rec *Record) error { + b.incoming <- rec + return nil +} + +// Head returns the oldest record node kept in memory. It can be used to +// iterate over records, one by one, up to the last record. +// +// Note: new records can get added while iterating. Hence the number of records +// iterated over might be larger than the maximum size. +func (b *ChannelMemoryBackend) Head() *node { + return b.head +} diff --git a/vendor/gopkg.in/op/go-logging.v1/multi.go b/vendor/gopkg.in/op/go-logging.v1/multi.go new file mode 100644 index 000000000..3731653e6 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/multi.go @@ -0,0 +1,65 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package logging + +// TODO remove Level stuff from the multi logger. Do one thing. + +// multiLogger is a log multiplexer which can be used to utilize multiple log +// backends at once. +type multiLogger struct { + backends []LeveledBackend +} + +// MultiLogger creates a logger which contain multiple loggers. +func MultiLogger(backends ...Backend) LeveledBackend { + var leveledBackends []LeveledBackend + for _, backend := range backends { + leveledBackends = append(leveledBackends, AddModuleLevel(backend)) + } + return &multiLogger{leveledBackends} +} + +// Log passes the log record to all backends. +func (b *multiLogger) Log(level Level, calldepth int, rec *Record) (err error) { + for _, backend := range b.backends { + if backend.IsEnabledFor(level, rec.Module) { + // Shallow copy of the record for the formatted cache on Record and get the + // record formatter from the backend. + r2 := *rec + if e := backend.Log(level, calldepth+1, &r2); e != nil { + err = e + } + } + } + return +} + +// GetLevel returns the highest level enabled by all backends. +func (b *multiLogger) GetLevel(module string) Level { + var level Level + for _, backend := range b.backends { + if backendLevel := backend.GetLevel(module); backendLevel > level { + level = backendLevel + } + } + return level +} + +// SetLevel propagates the same level to all backends. +func (b *multiLogger) SetLevel(level Level, module string) { + for _, backend := range b.backends { + backend.SetLevel(level, module) + } +} + +// IsEnabledFor returns true if any of the backends are enabled for it. +func (b *multiLogger) IsEnabledFor(level Level, module string) bool { + for _, backend := range b.backends { + if backend.IsEnabledFor(level, module) { + return true + } + } + return false +} diff --git a/vendor/gopkg.in/op/go-logging.v1/syslog.go b/vendor/gopkg.in/op/go-logging.v1/syslog.go new file mode 100644 index 000000000..4faa53170 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/syslog.go @@ -0,0 +1,53 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !windows,!plan9 + +package logging + +import "log/syslog" + +// SyslogBackend is a simple logger to syslog backend. It automatically maps +// the internal log levels to appropriate syslog log levels. +type SyslogBackend struct { + Writer *syslog.Writer +} + +// NewSyslogBackend connects to the syslog daemon using UNIX sockets with the +// given prefix. If prefix is not given, the prefix will be derived from the +// launched command. +func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { + var w *syslog.Writer + w, err = syslog.New(syslog.LOG_CRIT, prefix) + return &SyslogBackend{w}, err +} + +// NewSyslogBackendPriority is the same as NewSyslogBackend, but with custom +// syslog priority, like syslog.LOG_LOCAL3|syslog.LOG_DEBUG etc. +func NewSyslogBackendPriority(prefix string, priority syslog.Priority) (b *SyslogBackend, err error) { + var w *syslog.Writer + w, err = syslog.New(priority, prefix) + return &SyslogBackend{w}, err +} + +// Log implements the Backend interface. +func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { + line := rec.Formatted(calldepth + 1) + switch level { + case CRITICAL: + return b.Writer.Crit(line) + case ERROR: + return b.Writer.Err(line) + case WARNING: + return b.Writer.Warning(line) + case NOTICE: + return b.Writer.Notice(line) + case INFO: + return b.Writer.Info(line) + case DEBUG: + return b.Writer.Debug(line) + default: + } + panic("unhandled log level") +} diff --git a/vendor/gopkg.in/op/go-logging.v1/syslog_fallback.go b/vendor/gopkg.in/op/go-logging.v1/syslog_fallback.go new file mode 100644 index 000000000..91bc18de6 --- /dev/null +++ b/vendor/gopkg.in/op/go-logging.v1/syslog_fallback.go @@ -0,0 +1,28 @@ +// Copyright 2013, Örjan Persson. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build windows plan9 + +package logging + +import ( + "fmt" +) + +type Priority int + +type SyslogBackend struct { +} + +func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { + return nil, fmt.Errorf("Platform does not support syslog") +} + +func NewSyslogBackendPriority(prefix string, priority Priority) (b *SyslogBackend, err error) { + return nil, fmt.Errorf("Platform does not support syslog") +} + +func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { + return fmt.Errorf("Platform does not support syslog") +} diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml new file mode 100644 index 000000000..1bc5c1cd2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/.travis.yml @@ -0,0 +1,15 @@ +language: go + +go: + - "1.4" + - "1.5" + - "1.6" + - "1.7" + - "1.8" + - "1.9" + - "1.10" + - "1.11" + - "1.12" + - tip + +go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 000000000..2683e4bb1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 000000000..08eb1babd --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 000000000..65846e674 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,746 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 000000000..b2e16a6da --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,902 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + return &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + Line: p.event.start_mark.line + 1, + Column: p.event.start_mark.column + 1, + HeadComment: string(p.event.head_comment), + LineComment: string(p.event.line_comment), + FootComment: string(p.event.foot_comment), + } +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > 0.99 { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + d.merge(n.Content[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i++ { + if n.Content[i].ShortTag() != strTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + d.merge(n.Content[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *Node, out reflect.Value) { + switch n.Kind { + case MappingNode: + d.unmarshal(n, out) + case AliasNode: + if n.Alias != nil && n.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(n, out) + case SequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.Content) - 1; i >= 0; i-- { + ni := n.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 000000000..a1383c0a0 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,1992 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + // [Go] If inside a block sequence item, discount the space taken by the indicator. + if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + emitter.indent -= 2 + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + // [Go] The original logic here would not indent the sequence when inside a mapping. + // In Go we always indent it, but take the sequence indicator out of the indentation. + indentless := emitter.best_indent == 2 && emitter.mapping_context && !emitter.indention + original := emitter.indent + if !yaml_emitter_increase_indent(emitter, false, indentless) { + return false + } + if emitter.indent > original+2 { + emitter.indent -= 2 + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 000000000..38882b50e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,542 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var rtag string + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ = resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + k.FootComment = "" + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + } +} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod new file mode 100644 index 000000000..f407ea321 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v3" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 000000000..ec25faabc --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1210 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 000000000..b7de0a89c --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 000000000..64ae88805 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 000000000..4a9c65646 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,2974 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // Check if we really need to fetch more tokens. + need_more_tokens := false + + // [Go] The comment parsing logic requires a lookahead of one token + // in block style or two tokens in flow style so that the foot + // comments may be parsed in time of associating them with the tokens + // that are parsed before them. + if parser.tokens_head >= len(parser.tokens)-1 || parser.flow_level > 0 && parser.tokens_head >= len(parser.tokens)-2 { + need_more_tokens = true + } else { + // Check if any potential simple key may occupy the head position. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + if simple_key.possible && simple_key.token_number == parser.tokens_parsed { + need_more_tokens = true + break + } + } + } + + // We are finished. + if !need_more_tokens { + break + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Remove obsolete potential simple keys. + if !yaml_parser_stale_simple_keys(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +// Check the list of potential simple keys and remove the positions that +// cannot contain simple keys anymore. +func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { + // Check for a potential simple key for each flow level. + for i := range parser.simple_keys { + simple_key := &parser.simple_keys[i] + + // The specification requires that a simple key + // + // - is limited to a single line, + // - is shorter than 1024 characters. + if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { + + // Check if the potential simple key to be removed is required. + if simple_key.required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + } + } + return true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + } + simple_key.mark = parser.mark + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + return true +} + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // Increase the flow level. + parser.flow_level++ + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + } + return true +} + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if simple_key.possible { + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_line_comment(parser, start_mark) { + return false + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + parser.comments = append(parser.comments, yaml_comment_t{token_mark: token_mark}) + comment := &parser.comments[len(parser.comments)-1].line + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + if len(*comment) > 0 { + *comment = append(*comment, '\n') + } + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else { + if parser.mark.index >= seen { + *comment = append(*comment, parser.buffer[parser.buffer_pos]) + } + skip(parser) + } + } + } + break + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + // Got line break or terminator. + if !recent_empty { + if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + if len(text) > 0 { + if start_mark.column-1 < parser.indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else { + if parser.mark.index >= seen { + text = append(text, parser.buffer[parser.buffer_pos]) + } + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 000000000..9210ece7e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 000000000..b8a116bf9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 000000000..b5d35a50d --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,662 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decorder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 000000000..65fb0df3b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,803 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 000000000..e88f9c54a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a90475b12..7835f8a4f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -8,8 +8,12 @@ github.com/asaskevich/govalidator github.com/blang/semver # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew +# github.com/fatih/color v1.7.0 +github.com/fatih/color # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml +# github.com/go-bindata/go-bindata/v3 v3.1.3 +github.com/go-bindata/go-bindata/v3 # github.com/go-openapi/analysis v0.19.5 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal @@ -33,13 +37,15 @@ github.com/go-openapi/swag github.com/go-openapi/validate # github.com/go-stack/stack v1.8.0 github.com/go-stack/stack +# github.com/gobuffalo/flect v0.2.0 +github.com/gobuffalo/flect # github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys # github.com/golang-migrate/migrate/v4 v4.6.2 github.com/golang-migrate/migrate/v4/source github.com/golang-migrate/migrate/v4/source/file -# github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 +# github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef github.com/golang/groupcache/lru # github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto @@ -49,7 +55,7 @@ github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz -# github.com/googleapis/gnostic v0.2.0 +# github.com/googleapis/gnostic v0.3.1 github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions @@ -62,14 +68,25 @@ github.com/imdario/mergo github.com/inconshreveable/mousetrap # github.com/json-iterator/go v1.1.8 github.com/json-iterator/go +# github.com/kisielk/errcheck v1.2.0 +github.com/kisielk/errcheck +github.com/kisielk/errcheck/internal/errcheck # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences # github.com/mailru/easyjson v0.7.0 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter +# github.com/mattn/go-colorable v0.1.2 +github.com/mattn/go-colorable +# github.com/mattn/go-isatty v0.0.8 +github.com/mattn/go-isatty # github.com/mattn/go-sqlite3 v1.10.0 github.com/mattn/go-sqlite3 +# github.com/mikefarah/yaml/v2 v2.4.0 +github.com/mikefarah/yaml/v2 +# github.com/mikefarah/yq/v2 v2.4.1 +github.com/mikefarah/yq/v2 # github.com/mitchellh/mapstructure v1.1.2 github.com/mitchellh/mapstructure # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd @@ -102,8 +119,11 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 +# golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 golang.org/x/crypto/ssh/terminal +# golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f +golang.org/x/lint +golang.org/x/lint/golint # golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -116,7 +136,7 @@ golang.org/x/net/trace # golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 +# golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 golang.org/x/sys/unix golang.org/x/sys/windows # golang.org/x/text v0.3.2 @@ -127,6 +147,16 @@ golang.org/x/text/unicode/norm golang.org/x/text/width # golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 golang.org/x/time/rate +# golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f +golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gopathwalk +golang.org/x/tools/internal/semver +golang.org/x/tools/internal/span # google.golang.org/appengine v1.5.0 google.golang.org/appengine/internal google.golang.org/appengine/internal/base @@ -171,10 +201,16 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap +# gopkg.in/imdario/mergo.v0 v0.3.7 +gopkg.in/imdario/mergo.v0 # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 +# gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473 +gopkg.in/op/go-logging.v1 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966 +gopkg.in/yaml.v3 # k8s.io/api v0.17.3 k8s.io/api/admissionregistration/v1 k8s.io/api/admissionregistration/v1beta1 @@ -308,5 +344,22 @@ k8s.io/kube-openapi/pkg/util/proto # k8s.io/utils v0.0.0-20191114184206-e782cd3c129f k8s.io/utils/integer k8s.io/utils/pointer +# sigs.k8s.io/controller-runtime v0.5.2 +sigs.k8s.io/controller-runtime/pkg/scheme +# sigs.k8s.io/controller-tools v0.2.8 +sigs.k8s.io/controller-tools/cmd/controller-gen +sigs.k8s.io/controller-tools/pkg/crd +sigs.k8s.io/controller-tools/pkg/crd/markers +sigs.k8s.io/controller-tools/pkg/deepcopy +sigs.k8s.io/controller-tools/pkg/genall +sigs.k8s.io/controller-tools/pkg/genall/help +sigs.k8s.io/controller-tools/pkg/genall/help/pretty +sigs.k8s.io/controller-tools/pkg/loader +sigs.k8s.io/controller-tools/pkg/markers +sigs.k8s.io/controller-tools/pkg/rbac +sigs.k8s.io/controller-tools/pkg/schemapatcher +sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml +sigs.k8s.io/controller-tools/pkg/version +sigs.k8s.io/controller-tools/pkg/webhook # sigs.k8s.io/yaml v1.1.0 sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go new file mode 100644 index 000000000..cd6031967 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -0,0 +1,94 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Each API group should define a utility function +// called AddToScheme for adding its types to a Scheme: +// +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) +// +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) +// +// This also true of the built-in Kubernetes types. Then, in the entrypoint for +// your manager, assemble the scheme containing exactly the types you need. +// For instance, if our controller needs types from the core/v1 API group (e.g. Pod), +// plus types from my.api.group/v1: +// +// func init() { +// myapigroupv1.AddToScheme(scheme) +// kubernetesscheme.AddToScheme(scheme) +// } +// +// func main() { +// mgr := controllers.NewManager(controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } +// +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder struct { + GroupVersion schema.GroupVersion + runtime.SchemeBuilder +} + +// Register adds one or objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +func (bld *Builder) Register(object ...runtime.Object) *Builder { + bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(bld.GroupVersion, object...) + metav1.AddToGroupVersion(scheme, bld.GroupVersion) + return nil + }) + return bld +} + +// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. +func (bld *Builder) RegisterAll(b *Builder) *Builder { + bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) + return bld +} + +// AddToScheme adds all registered types to s. +func (bld *Builder) AddToScheme(s *runtime.Scheme) error { + return bld.SchemeBuilder.AddToScheme(s) +} + +// Build returns a new Scheme containing the registered types. +func (bld *Builder) Build() (*runtime.Scheme, error) { + s := runtime.NewScheme() + return s, bld.AddToScheme(s) +} diff --git a/vendor/sigs.k8s.io/controller-tools/LICENSE b/vendor/sigs.k8s.io/controller-tools/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go b/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go new file mode 100644 index 000000000..692b26c8d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go @@ -0,0 +1,263 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + "strings" + + "github.com/spf13/cobra" + + "sigs.k8s.io/controller-tools/pkg/crd" + "sigs.k8s.io/controller-tools/pkg/deepcopy" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/genall/help" + prettyhelp "sigs.k8s.io/controller-tools/pkg/genall/help/pretty" + "sigs.k8s.io/controller-tools/pkg/markers" + "sigs.k8s.io/controller-tools/pkg/rbac" + "sigs.k8s.io/controller-tools/pkg/schemapatcher" + "sigs.k8s.io/controller-tools/pkg/version" + "sigs.k8s.io/controller-tools/pkg/webhook" +) + +//go:generate go run ../helpgen/main.go paths=../../pkg/... generate:headerFile=../../boilerplate.go.txt,year=2019 + +// Options are specified to controller-gen by turning generators and output rules into +// markers, and then parsing them using the standard registry logic (without the "+"). +// Each marker and output rule should thus be usable as a marker target. + +var ( + // allGenerators maintains the list of all known generators, giving + // them names for use on the command line. + // each turns into a command line option, + // and has options for output forms. + allGenerators = map[string]genall.Generator{ + "crd": crd.Generator{}, + "rbac": rbac.Generator{}, + "object": deepcopy.Generator{}, + "webhook": webhook.Generator{}, + "schemapatch": schemapatcher.Generator{}, + } + + // allOutputRules defines the list of all known output rules, giving + // them names for use on the command line. + // Each output rule turns into two command line options: + // - output::
(per-generator output) + // - output: (default output) + allOutputRules = map[string]genall.OutputRule{ + "dir": genall.OutputToDirectory(""), + "none": genall.OutputToNothing, + "stdout": genall.OutputToStdout, + "artifacts": genall.OutputArtifacts{}, + } + + // optionsRegistry contains all the marker definitions used to process command line options + optionsRegistry = &markers.Registry{} +) + +func init() { + for genName, gen := range allGenerators { + // make the generator options marker itself + defn := markers.Must(markers.MakeDefinition(genName, markers.DescribesPackage, gen)) + if err := optionsRegistry.Register(defn); err != nil { + panic(err) + } + if helpGiver, hasHelp := gen.(genall.HasHelp); hasHelp { + if help := helpGiver.Help(); help != nil { + optionsRegistry.AddHelp(defn, help) + } + } + + // make per-generation output rule markers + for ruleName, rule := range allOutputRules { + ruleMarker := markers.Must(markers.MakeDefinition(fmt.Sprintf("output:%s:%s", genName, ruleName), markers.DescribesPackage, rule)) + if err := optionsRegistry.Register(ruleMarker); err != nil { + panic(err) + } + if helpGiver, hasHelp := rule.(genall.HasHelp); hasHelp { + if help := helpGiver.Help(); help != nil { + optionsRegistry.AddHelp(ruleMarker, help) + } + } + } + } + + // make "default output" output rule markers + for ruleName, rule := range allOutputRules { + ruleMarker := markers.Must(markers.MakeDefinition("output:"+ruleName, markers.DescribesPackage, rule)) + if err := optionsRegistry.Register(ruleMarker); err != nil { + panic(err) + } + if helpGiver, hasHelp := rule.(genall.HasHelp); hasHelp { + if help := helpGiver.Help(); help != nil { + optionsRegistry.AddHelp(ruleMarker, help) + } + } + } + + // add in the common options markers + if err := genall.RegisterOptionsMarkers(optionsRegistry); err != nil { + panic(err) + } +} + +// noUsageError suppresses usage printing when it occurs +// (since cobra doesn't provide a good way to avoid printing +// out usage in only certain situations). +type noUsageError struct{ error } + +func main() { + helpLevel := 0 + whichLevel := 0 + showVersion := false + + cmd := &cobra.Command{ + Use: "controller-gen", + Short: "Generate Kubernetes API extension resources and code.", + Long: "Generate Kubernetes API extension resources and code.", + Example: ` # Generate RBAC manifests and crds for all types under apis/, + # outputting crds to /tmp/crds and everything else to stdout + controller-gen rbac:roleName= crd paths=./apis/... output:crd:dir=/tmp/crds output:stdout + + # Generate deepcopy/runtime.Object implementations for a particular file + controller-gen object paths=./apis/v1beta1/some_types.go + + # Generate OpenAPI v3 schemas for API packages and merge them into existing CRD manifests + controller-gen schemapatch:manifests=./manifests output:dir=./manifests paths=./pkg/apis/... + + # Run all the generators for a given project + controller-gen paths=./apis/... + + # Explain the markers for generating CRDs, and their arguments + controller-gen crd -ww +`, + RunE: func(c *cobra.Command, rawOpts []string) error { + // print version if asked for it + if showVersion { + version.Print() + return nil + } + + // print the help if we asked for it (since we've got a different help flag :-/), then bail + if helpLevel > 0 { + return c.Usage() + } + + // print the marker docs if we asked for them, then bail + if whichLevel > 0 { + return printMarkerDocs(c, rawOpts, whichLevel) + } + + // otherwise, set up the runtime for actually running the generators + rt, err := genall.FromOptions(optionsRegistry, rawOpts) + if err != nil { + return err + } + if len(rt.Generators) == 0 { + return fmt.Errorf("no generators specified") + } + + if hadErrs := rt.Run(); hadErrs { + // don't obscure the actual error with a bunch of usage + return noUsageError{fmt.Errorf("not all generators ran successfully")} + } + return nil + }, + SilenceUsage: true, // silence the usage, then print it out ourselves if it wasn't suppressed + } + cmd.Flags().CountVarP(&whichLevel, "which-markers", "w", "print out all markers available with the requested generators\n(up to -www for the most detailed output, or -wwww for json output)") + cmd.Flags().CountVarP(&helpLevel, "detailed-help", "h", "print out more detailed help\n(up to -hhh for the most detailed output, or -hhhh for json output)") + cmd.Flags().BoolVar(&showVersion, "version", false, "show version") + cmd.Flags().Bool("help", false, "print out usage and a summary of options") + oldUsage := cmd.UsageFunc() + cmd.SetUsageFunc(func(c *cobra.Command) error { + if err := oldUsage(c); err != nil { + return err + } + if helpLevel == 0 { + helpLevel = summaryHelp + } + fmt.Fprintf(c.OutOrStderr(), "\n\nOptions\n\n") + return helpForLevels(c.OutOrStdout(), c.OutOrStderr(), helpLevel, optionsRegistry, help.SortByOption) + }) + + if err := cmd.Execute(); err != nil { + if _, noUsage := err.(noUsageError); !noUsage { + // print the usage unless we suppressed it + if err := cmd.Usage(); err != nil { + panic(err) + } + } + fmt.Fprintf(cmd.OutOrStderr(), "run `%[1]s %[2]s -w` to see all available markers, or `%[1]s %[2]s -h` for usage\n", cmd.CalledAs(), strings.Join(os.Args[1:], " ")) + os.Exit(1) + } +} + +// printMarkerDocs prints out marker help for the given generators specified in +// the rawOptions, at the given level. +func printMarkerDocs(c *cobra.Command, rawOptions []string, whichLevel int) error { + // just grab a registry so we don't lag while trying to load roots + // (like we'd do if we just constructed the full runtime). + reg, err := genall.RegistryFromOptions(optionsRegistry, rawOptions) + if err != nil { + return err + } + + return helpForLevels(c.OutOrStdout(), c.OutOrStderr(), whichLevel, reg, help.SortByCategory) +} + +func helpForLevels(mainOut io.Writer, errOut io.Writer, whichLevel int, reg *markers.Registry, sorter help.SortGroup) error { + helpInfo := help.ByCategory(reg, sorter) + switch whichLevel { + case jsonHelp: + if err := json.NewEncoder(mainOut).Encode(helpInfo); err != nil { + return err + } + case detailedHelp, fullHelp: + fullDetail := whichLevel == fullHelp + for _, cat := range helpInfo { + if cat.Category == "" { + continue + } + contents := prettyhelp.MarkersDetails(fullDetail, cat.Category, cat.Markers) + if err := contents.WriteTo(errOut); err != nil { + return err + } + } + case summaryHelp: + for _, cat := range helpInfo { + if cat.Category == "" { + continue + } + contents := prettyhelp.MarkersSummary(cat.Category, cat.Markers) + if err := contents.WriteTo(errOut); err != nil { + return err + } + } + } + return nil +} + +const ( + _ = iota + summaryHelp + detailedHelp + fullHelp + jsonHelp +) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go new file mode 100644 index 000000000..e429db5cc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/conv.go @@ -0,0 +1,122 @@ +package crd + +import ( + "fmt" + + apiextinternal "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + conversionScheme = runtime.NewScheme() +) + +func init() { + if err := apiextinternal.AddToScheme(conversionScheme); err != nil { + panic("must be able to add internal apiextensions to the CRD conversion Scheme") + } + if err := apiext.AddToScheme(conversionScheme); err != nil { + panic("must be able to add apiextensions/v1 to the CRD conversion Scheme") + } + if err := apiextv1beta1.AddToScheme(conversionScheme); err != nil { + panic("must be able to add apiextensions/v1beta1 to the CRD conversion Scheme") + } +} + +// AsVersion converts a CRD from the canonical internal form (currently v1) to some external form. +func AsVersion(original apiext.CustomResourceDefinition, gv schema.GroupVersion) (runtime.Object, error) { + // We can use the internal versions an existing conversions from kubernetes, since they're not in k/k itself. + // This punts the problem of conversion down the road for a future maintainer (or future instance of @directxman12) + // when we have to support older versions that get removed, or when API machinery decides to yell at us for this + // questionable decision. + intVer, err := conversionScheme.ConvertToVersion(&original, apiextinternal.SchemeGroupVersion) + if err != nil { + return nil, fmt.Errorf("unable to convert to internal CRD version: %w", err) + } + + return conversionScheme.ConvertToVersion(intVer, gv) +} + +// mergeIdenticalSubresources checks to see if subresources are identical across +// all versions, and if so, merges them into a top-level version. +// +// This assumes you're not using trivial versions. +func mergeIdenticalSubresources(crd *apiextv1beta1.CustomResourceDefinition) { + subres := crd.Spec.Versions[0].Subresources + for _, ver := range crd.Spec.Versions { + if ver.Subresources == nil || !equality.Semantic.DeepEqual(subres, ver.Subresources) { + // either all nil, or not identical + return + } + } + + // things are identical if we've gotten this far, so move the subresources up + // and discard the identical per-version ones + crd.Spec.Subresources = subres + for i := range crd.Spec.Versions { + crd.Spec.Versions[i].Subresources = nil + } +} + +// mergeIdenticalSchemata checks to see if schemata are identical across +// all versions, and if so, merges them into a top-level version. +// +// This assumes you're not using trivial versions. +func mergeIdenticalSchemata(crd *apiextv1beta1.CustomResourceDefinition) { + schema := crd.Spec.Versions[0].Schema + for _, ver := range crd.Spec.Versions { + if ver.Schema == nil || !equality.Semantic.DeepEqual(schema, ver.Schema) { + // either all nil, or not identical + return + } + } + + // things are identical if we've gotten this far, so move the schemata up + // to a single schema and discard the identical per-version ones + crd.Spec.Validation = schema + for i := range crd.Spec.Versions { + crd.Spec.Versions[i].Schema = nil + } +} + +// mergeIdenticalPrinterColumns checks to see if schemata are identical across +// all versions, and if so, merges them into a top-level version. +// +// This assumes you're not using trivial versions. +func mergeIdenticalPrinterColumns(crd *apiextv1beta1.CustomResourceDefinition) { + cols := crd.Spec.Versions[0].AdditionalPrinterColumns + for _, ver := range crd.Spec.Versions { + if len(ver.AdditionalPrinterColumns) == 0 || !equality.Semantic.DeepEqual(cols, ver.AdditionalPrinterColumns) { + // either all nil, or not identical + return + } + } + + // things are identical if we've gotten this far, so move the printer columns up + // and discard the identical per-version ones + crd.Spec.AdditionalPrinterColumns = cols + for i := range crd.Spec.Versions { + crd.Spec.Versions[i].AdditionalPrinterColumns = nil + } +} + +// MergeIdenticalVersionInfo makes sure that components of the Versions field that are identical +// across all versions get merged into the top-level fields in v1beta1. +// +// This is required by the Kubernetes API server validation. +// +// The reason is that a v1beta1 -> v1 -> v1beta1 conversion cycle would need to +// round-trip identically, v1 doesn't have top-level subresources, and without +// this restriction it would be ambiguous how a v1-with-identical-subresources +// converts into a v1beta1). +func MergeIdenticalVersionInfo(crd *apiextv1beta1.CustomResourceDefinition) { + if len(crd.Spec.Versions) > 0 { + mergeIdenticalSubresources(crd) + mergeIdenticalSchemata(crd) + mergeIdenticalPrinterColumns(crd) + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go new file mode 100644 index 000000000..cba36c46c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/desc_visitor.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "strings" + "unicode" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// TruncateDescription truncates the description of fields in given schema if it +// exceeds maxLen. +// It tries to chop off the description at the closest sentence boundary. +func TruncateDescription(schema *apiext.JSONSchemaProps, maxLen int) { + EditSchema(schema, descVisitor{maxLen: maxLen}) +} + +// descVisitor recursively visits all fields in the schema and truncates the +// description of the fields to specified maxLen. +type descVisitor struct { + // maxLen is the maximum allowed length for decription of a field + maxLen int +} + +func (v descVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { + if schema == nil { + return v + } + if v.maxLen < 0 { + return nil /* no further work to be done for this schema */ + } + if v.maxLen == 0 { + schema.Description = "" + return v + } + if len(schema.Description) > v.maxLen { + schema.Description = truncateString(schema.Description, v.maxLen) + return v + } + return v +} + +// truncateString truncates given desc string if it exceeds maxLen. It may +// return string with length less than maxLen even in cases where original desc +// exceeds maxLen because it tries to chop off the desc at the closest sentence +// boundary to avoid incomplete sentences. +func truncateString(desc string, maxLen int) string { + desc = desc[0:maxLen] + + // Trying to chop off at closest sentence boundary. + if n := strings.LastIndexFunc(desc, isSentenceTerminal); n > 0 { + return desc[0 : n+1] + } + // TODO(droot): Improve the logic to chop off at closest word boundary + // or add elipses (...) to indicate that it's chopped incase no closest + // sentence found within maxLen. + return desc +} + +// helper function to determine if given rune is a sentence terminal or not. +func isSentenceTerminal(r rune) bool { + return unicode.Is(unicode.STerm, r) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go new file mode 100644 index 000000000..fd48e44ae --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/doc.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package crd contains utilities for generating CustomResourceDefinitions and +// their corresponding OpenAPI validation schemata. +// +// Markers +// +// Markers live under the markers subpackage. Two types of markers exist: +// those that modify schema generation (for validation), and those that modify +// the rest of the CRD. See the subpackage for more information and all +// supported markers. +// +// Collecting Types and Generating CRDs +// +// The Parser is the entrypoint for collecting the information required to +// generate CRDs. Like loader and collector, its methods are idemptotent, not +// doing extra work if called multiple times. +// +// Parser's method start with Need. Calling NeedXYZ indicates that XYZ should +// be made present in the eqivalent field in the Parser, where it can then be +// loaded from. Each Need method will in turn call Need on anything it needs. +// +// In general, root packages should first be loaded into the Parser with +// NeedPackage. Then, CRDs can be generated with NeedCRDFor. +// +// Errors are generally attached directly to the relevant Package with +// AddError. +// +// Known Packages +// +// There are a few types from Kubernetes that have special meaning, but don't +// have validation markers attached. Those specific types have overrides +// listed in KnownPackages that can be added as overrides to any parser. +// +// Flattening +// +// Once schemata are generated, they can be used directly by external tooling +// (like JSONSchema validators), but must first be "flattened" to not contain +// references before use in a CRD (Kubernetes doesn't allow references in the +// CRD's validation schema). +// +// The Flattener built in to the Parser takes care of flattening out references +// when requesting the CRDs, but can be invoked manually. It will not modify +// the input schemata. +// +// Flattened schemata may further be passed to FlattenEmbedded to remove the +// use of AllOf (which is used to describe embedded struct fields when +// references are in use). This done automatically when fetching CRDs. +package crd diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go new file mode 100644 index 000000000..00f4dc4c7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/flatten.go @@ -0,0 +1,441 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "reflect" + "sort" + "strings" + "sync" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// ErrorRecorder knows how to record errors. It wraps the part of +// pkg/loader.Package that we need to record errors in places were it might not +// make sense to have a loader.Package +type ErrorRecorder interface { + // AddError records that the given error occurred. + // See the documentation on loader.Package.AddError for more information. + AddError(error) +} + +// isOrNil checks if val is nil if val is of a nillable type, otherwise, +// it compares val to valInt (which should probably be the zero value). +func isOrNil(val reflect.Value, valInt interface{}, zeroInt interface{}) bool { + switch valKind := val.Kind(); valKind { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return val.IsNil() + default: + return valInt == zeroInt + } +} + +// flattenAllOfInto copies properties from src to dst, then copies the properties +// of each item in src's allOf to dst's properties as well. +func flattenAllOfInto(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps, errRec ErrorRecorder) { + if len(src.AllOf) > 0 { + for _, embedded := range src.AllOf { + flattenAllOfInto(dst, embedded, errRec) + } + } + + dstVal := reflect.Indirect(reflect.ValueOf(dst)) + srcVal := reflect.ValueOf(src) + typ := dstVal.Type() + + srcRemainder := apiext.JSONSchemaProps{} + srcRemVal := reflect.Indirect(reflect.ValueOf(&srcRemainder)) + dstRemainder := apiext.JSONSchemaProps{} + dstRemVal := reflect.Indirect(reflect.ValueOf(&dstRemainder)) + hoisted := false + + for i := 0; i < srcVal.NumField(); i++ { + fieldName := typ.Field(i).Name + switch fieldName { + case "AllOf": + // don't merge because we deal with it above + continue + case "Title", "Description", "Example", "ExternalDocs": + // don't merge because we pre-merge to properly preserve field docs + continue + } + srcField := srcVal.Field(i) + fldTyp := srcField.Type() + zeroVal := reflect.Zero(fldTyp) + zeroInt := zeroVal.Interface() + srcInt := srcField.Interface() + + if isOrNil(srcField, srcInt, zeroInt) { + // nothing to copy from src, continue + continue + } + + dstField := dstVal.Field(i) + dstInt := dstField.Interface() + if isOrNil(dstField, dstInt, zeroInt) { + // dst is empty, continue + dstField.Set(srcField) + continue + } + + if fldTyp.Comparable() && srcInt == dstInt { + // same value, continue + continue + } + + // resolve conflict + switch fieldName { + case "Properties": + // merge if possible, use all of otherwise + srcMap := srcInt.(map[string]apiext.JSONSchemaProps) + dstMap := dstInt.(map[string]apiext.JSONSchemaProps) + + for k, v := range srcMap { + dstProp, exists := dstMap[k] + if !exists { + dstMap[k] = v + continue + } + flattenAllOfInto(&dstProp, v, errRec) + dstMap[k] = dstProp + } + case "Required": + // merge + dstField.Set(reflect.AppendSlice(dstField, srcField)) + case "Type": + if srcInt != dstInt { + // TODO(directxman12): figure out how to attach this back to a useful point in the Go source or in the schema + errRec.AddError(fmt.Errorf("conflicting types in allOf branches in schema: %s vs %s", dstInt, srcInt)) + } + // keep the destination value, for now + // TODO(directxman12): Default -- use field? + // TODO(directxman12): + // - Dependencies: if field x is present, then either schema validates or all props are present + // - AdditionalItems: like AdditionalProperties + // - Definitions: common named validation sets that can be references (merge, bail if duplicate) + case "AdditionalProperties": + // as of the time of writing, `allows: false` is not allowed, so we don't have to handle it + srcProps := srcInt.(*apiext.JSONSchemaPropsOrBool) + if srcProps.Schema == nil { + // nothing to merge + continue + } + dstProps := dstInt.(*apiext.JSONSchemaPropsOrBool) + if dstProps.Schema == nil { + dstProps.Schema = &apiext.JSONSchemaProps{} + } + flattenAllOfInto(dstProps.Schema, *srcProps.Schema, errRec) + // NB(directxman12): no need to explicitly handle nullable -- false is considered to be the zero value + // TODO(directxman12): src isn't necessarily the field value -- it's just the most recent allOf entry + default: + // hoist into allOf... + hoisted = true + + srcRemVal.Field(i).Set(srcField) + dstRemVal.Field(i).Set(dstField) + // ...and clear the original + dstField.Set(zeroVal) + } + } + + if hoisted { + dst.AllOf = append(dst.AllOf, dstRemainder, srcRemainder) + } + + // dedup required + if len(dst.Required) > 0 { + reqUniq := make(map[string]struct{}) + for _, req := range dst.Required { + reqUniq[req] = struct{}{} + } + dst.Required = make([]string, 0, len(reqUniq)) + for req := range reqUniq { + dst.Required = append(dst.Required, req) + } + // be deterministic + sort.Strings(dst.Required) + } +} + +// allOfVisitor recursively visits allOf fields in the schema, +// merging nested allOf properties into the root schema. +type allOfVisitor struct { + // errRec is used to record errors while flattening (like two conflicting + // field values used in an allOf) + errRec ErrorRecorder +} + +func (v *allOfVisitor) Visit(schema *apiext.JSONSchemaProps) SchemaVisitor { + if schema == nil { + return v + } + + // clear this now so that we can safely preserve edits made my flattenAllOfInto + origAllOf := schema.AllOf + schema.AllOf = nil + + for _, embedded := range origAllOf { + flattenAllOfInto(schema, embedded, v.errRec) + } + return v +} + +// NB(directxman12): FlattenEmbedded is separate from Flattener because +// some tooling wants to flatten out embedded fields, but only actually +// flatten a few specific types first. + +// FlattenEmbedded flattens embedded fields (represented via AllOf) which have +// already had their references resolved into simple properties in the containing +// schema. +func FlattenEmbedded(schema *apiext.JSONSchemaProps, errRec ErrorRecorder) *apiext.JSONSchemaProps { + outSchema := schema.DeepCopy() + EditSchema(outSchema, &allOfVisitor{errRec: errRec}) + return outSchema +} + +// Flattener knows how to take a root type, and flatten all references in it +// into a single, flat type. Flattened types are cached, so it's relatively +// cheap to make repeated calls with the same type. +type Flattener struct { + // Parser is used to lookup package and type details, and parse in new packages. + Parser *Parser + + LookupReference func(ref string, contextPkg *loader.Package) (TypeIdent, error) + + // flattenedTypes hold the flattened version of each seen type for later reuse. + flattenedTypes map[TypeIdent]apiext.JSONSchemaProps + initOnce sync.Once +} + +func (f *Flattener) init() { + f.initOnce.Do(func() { + f.flattenedTypes = make(map[TypeIdent]apiext.JSONSchemaProps) + if f.LookupReference == nil { + f.LookupReference = identFromRef + } + }) +} + +// cacheType saves the flattened version of the given type for later reuse +func (f *Flattener) cacheType(typ TypeIdent, schema apiext.JSONSchemaProps) { + f.init() + f.flattenedTypes[typ] = schema +} + +// loadUnflattenedSchema fetches a fresh, unflattened schema from the parser. +func (f *Flattener) loadUnflattenedSchema(typ TypeIdent) (*apiext.JSONSchemaProps, error) { + f.Parser.NeedSchemaFor(typ) + + baseSchema, found := f.Parser.Schemata[typ] + if !found { + return nil, fmt.Errorf("unable to locate schema for type %s", typ) + } + return &baseSchema, nil +} + +// FlattenType flattens the given pre-loaded type, removing any references from it. +// It deep-copies the schema first, so it won't affect the parser's version of the schema. +func (f *Flattener) FlattenType(typ TypeIdent) *apiext.JSONSchemaProps { + f.init() + if cachedSchema, isCached := f.flattenedTypes[typ]; isCached { + return &cachedSchema + } + baseSchema, err := f.loadUnflattenedSchema(typ) + if err != nil { + typ.Package.AddError(err) + return nil + } + resSchema := f.FlattenSchema(*baseSchema, typ.Package) + f.cacheType(typ, *resSchema) + return resSchema +} + +// FlattenSchema flattens the given schema, removing any references. +// It deep-copies the schema first, so the input schema won't be affected. +func (f *Flattener) FlattenSchema(baseSchema apiext.JSONSchemaProps, currentPackage *loader.Package) *apiext.JSONSchemaProps { + resSchema := baseSchema.DeepCopy() + EditSchema(resSchema, &flattenVisitor{ + Flattener: f, + currentPackage: currentPackage, + }) + + return resSchema +} + +// RefParts splits a reference produced by the schema generator into its component +// type name and package name (if it's a cross-package reference). Note that +// referenced packages *must* be looked up relative to the current package. +func RefParts(ref string) (typ string, pkgName string, err error) { + if !strings.HasPrefix(ref, defPrefix) { + return "", "", fmt.Errorf("non-standard reference link %q", ref) + } + ref = ref[len(defPrefix):] + // decode the json pointer encodings + ref = strings.Replace(ref, "~1", "/", -1) + ref = strings.Replace(ref, "~0", "~", -1) + nameParts := strings.SplitN(ref, "~", 2) + + if len(nameParts) == 1 { + // local reference + return nameParts[0], "", nil + } + // cross-package reference + return nameParts[1], nameParts[0], nil +} + +// identFromRef converts the given schema ref from the given package back +// into the TypeIdent that it represents. +func identFromRef(ref string, contextPkg *loader.Package) (TypeIdent, error) { + typ, pkgName, err := RefParts(ref) + if err != nil { + return TypeIdent{}, err + } + + if pkgName == "" { + // a local reference + return TypeIdent{ + Name: typ, + Package: contextPkg, + }, nil + } + + // an external reference + return TypeIdent{ + Name: typ, + Package: contextPkg.Imports()[pkgName], + }, nil +} + +// preserveFields copies documentation fields from src into dst, preserving +// field-level documentation when flattening, and preserving field-level validation +// as allOf entries. +func preserveFields(dst *apiext.JSONSchemaProps, src apiext.JSONSchemaProps) { + srcDesc := src.Description + srcTitle := src.Title + srcExDoc := src.ExternalDocs + srcEx := src.Example + + src.Description, src.Title, src.ExternalDocs, src.Example = "", "", nil, nil + + src.Ref = nil + *dst = apiext.JSONSchemaProps{ + AllOf: []apiext.JSONSchemaProps{*dst, src}, + + // keep these, in case the source field doesn't specify anything useful + Description: dst.Description, + Title: dst.Title, + ExternalDocs: dst.ExternalDocs, + Example: dst.Example, + } + + if srcDesc != "" { + dst.Description = srcDesc + } + if srcTitle != "" { + dst.Title = srcTitle + } + if srcExDoc != nil { + dst.ExternalDocs = srcExDoc + } + if srcEx != nil { + dst.Example = srcEx + } +} + +// flattenVisitor visits each node in the schema, recursively flattening references. +type flattenVisitor struct { + *Flattener + + currentPackage *loader.Package + currentType *TypeIdent + currentSchema *apiext.JSONSchemaProps + originalField apiext.JSONSchemaProps +} + +func (f *flattenVisitor) Visit(baseSchema *apiext.JSONSchemaProps) SchemaVisitor { + if baseSchema == nil { + // end-of-node marker, cache the results + if f.currentType != nil { + f.cacheType(*f.currentType, *f.currentSchema) + // preserve field information *after* caching so that we don't + // accidentally cache field-level information onto the schema for + // the type in general. + preserveFields(f.currentSchema, f.originalField) + } + return f + } + + // if we get a type that's just a ref, resolve it + if baseSchema.Ref != nil && len(*baseSchema.Ref) > 0 { + // resolve this ref + refIdent, err := f.LookupReference(*baseSchema.Ref, f.currentPackage) + if err != nil { + f.currentPackage.AddError(err) + return nil + } + + // load and potentially flatten the schema + + // check the cache first... + if refSchemaCached, isCached := f.flattenedTypes[refIdent]; isCached { + // shallow copy is fine, it's just to avoid overwriting the doc fields + preserveFields(&refSchemaCached, *baseSchema) + *baseSchema = refSchemaCached + return nil // don't recurse, we're done + } + + // ...otherwise, we need to flatten + refSchema, err := f.loadUnflattenedSchema(refIdent) + if err != nil { + f.currentPackage.AddError(err) + return nil + } + refSchema = refSchema.DeepCopy() + + // keep field around to preserve field-level validation, docs, etc + origField := *baseSchema + *baseSchema = *refSchema + + // avoid loops (which shouldn't exist, but just in case) + // by marking a nil cached pointer before we start recursing + f.cacheType(refIdent, apiext.JSONSchemaProps{}) + + return &flattenVisitor{ + Flattener: f.Flattener, + + currentPackage: refIdent.Package, + currentType: &refIdent, + currentSchema: baseSchema, + originalField: origField, + } + } + + // otherwise, continue recursing... + if f.currentType != nil { + // ...but don't accidentally end this node early (for caching purposes) + return &flattenVisitor{ + Flattener: f.Flattener, + currentPackage: f.currentPackage, + } + } + + return f +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go new file mode 100644 index 000000000..b82ed49e7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/gen.go @@ -0,0 +1,264 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/types" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextlegacy "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + + crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" + "sigs.k8s.io/controller-tools/pkg/version" +) + +// +controllertools:marker:generateHelp + +// Generator generates CustomResourceDefinition objects. +type Generator struct { + // TrivialVersions indicates that we should produce a single-version CRD. + // + // Single "trivial-version" CRDs are compatible with older (pre 1.13) + // Kubernetes API servers. The storage version's schema will be used as + // the CRD's schema. + // + // Only works with the v1beta1 CRD version. + TrivialVersions bool `marker:",optional"` + + // PreserveUnknownFields indicates whether or not we should turn off pruning. + // + // Left unspecified, it'll default to true when only a v1beta1 CRD is + // generated (to preserve compatibility with older versions of this tool), + // or false otherwise. + // + // It's required to be false for v1 CRDs. + PreserveUnknownFields *bool `marker:",optional"` + + // MaxDescLen specifies the maximum description length for fields in CRD's OpenAPI schema. + // + // 0 indicates drop the description for all fields completely. + // n indicates limit the description to at most n characters and truncate the description to + // closest sentence boundary if it exceeds n characters. + MaxDescLen *int `marker:",optional"` + + // CRDVersions specifies the target API versions of the CRD type itself to + // generate. Defaults to v1beta1. + // + // The first version listed will be assumed to be the "default" version and + // will not get a version suffix in the output filename. + // + // You'll need to use "v1" to get support for features like defaulting, + // along with an API server that supports it (Kubernetes 1.16+). + CRDVersions []string `marker:"crdVersions,optional"` +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + return crdmarkers.Register(into) +} +func (g Generator) Generate(ctx *genall.GenerationContext) error { + parser := &Parser{ + Collector: ctx.Collector, + Checker: ctx.Checker, + } + + AddKnownTypes(parser) + for _, root := range ctx.Roots { + parser.NeedPackage(root) + } + + metav1Pkg := FindMetav1(ctx.Roots) + if metav1Pkg == nil { + // no objects in the roots, since nothing imported metav1 + return nil + } + + // TODO: allow selecting a specific object + kubeKinds := FindKubeKinds(parser, metav1Pkg) + if len(kubeKinds) == 0 { + // no objects in the roots + return nil + } + + crdVersions := g.CRDVersions + + if len(crdVersions) == 0 { + crdVersions = []string{"v1beta1"} + } + + for groupKind := range kubeKinds { + parser.NeedCRDFor(groupKind, g.MaxDescLen) + crdRaw := parser.CustomResourceDefinitions[groupKind] + addAttribution(&crdRaw) + + versionedCRDs := make([]interface{}, len(crdVersions)) + for i, ver := range crdVersions { + conv, err := AsVersion(crdRaw, schema.GroupVersion{Group: apiext.SchemeGroupVersion.Group, Version: ver}) + if err != nil { + return err + } + versionedCRDs[i] = conv + } + + if g.TrivialVersions { + for i, crd := range versionedCRDs { + if crdVersions[i] == "v1beta1" { + toTrivialVersions(crd.(*apiextlegacy.CustomResourceDefinition)) + } + } + } + + // *If* we're only generating v1beta1 CRDs, default to `preserveUnknownFields: (unset)` + // for compatibility purposes. In any other case, default to false, since that's + // the sensible default and is required for v1. + v1beta1Only := len(crdVersions) == 1 && crdVersions[0] == "v1beta1" + switch { + case (g.PreserveUnknownFields == nil || *g.PreserveUnknownFields) && v1beta1Only: + crd := versionedCRDs[0].(*apiextlegacy.CustomResourceDefinition) + crd.Spec.PreserveUnknownFields = nil + case g.PreserveUnknownFields == nil, g.PreserveUnknownFields != nil && !*g.PreserveUnknownFields: + // it'll be false here (coming from v1) -- leave it as such + default: + return fmt.Errorf("you may only set PreserveUnknownFields to true with v1beta1 CRDs") + } + + for i, crd := range versionedCRDs { + var fileName string + if i == 0 { + fileName = fmt.Sprintf("%s_%s.yaml", crdRaw.Spec.Group, crdRaw.Spec.Names.Plural) + } else { + fileName = fmt.Sprintf("%s_%s.%s.yaml", crdRaw.Spec.Group, crdRaw.Spec.Names.Plural, crdVersions[i]) + } + if err := ctx.WriteYAML(fileName, crd); err != nil { + return err + } + } + } + + return nil +} + +// toTrivialVersions strips out all schemata except for the storage schema, +// and moves that up into the root object. This makes the CRD compatible +// with pre 1.13 clusters. +func toTrivialVersions(crd *apiextlegacy.CustomResourceDefinition) { + var canonicalSchema *apiextlegacy.CustomResourceValidation + var canonicalSubresources *apiextlegacy.CustomResourceSubresources + var canonicalColumns []apiextlegacy.CustomResourceColumnDefinition + for i, ver := range crd.Spec.Versions { + if ver.Storage == true { + canonicalSchema = ver.Schema + canonicalSubresources = ver.Subresources + canonicalColumns = ver.AdditionalPrinterColumns + } + crd.Spec.Versions[i].Schema = nil + crd.Spec.Versions[i].Subresources = nil + crd.Spec.Versions[i].AdditionalPrinterColumns = nil + } + if canonicalSchema == nil { + return + } + + crd.Spec.Validation = canonicalSchema + crd.Spec.Subresources = canonicalSubresources + crd.Spec.AdditionalPrinterColumns = canonicalColumns +} + +// addAttribution adds attribution info to indicate controller-gen tool was used +// to generate this CRD definition along with the version info. +func addAttribution(crd *apiext.CustomResourceDefinition) { + if crd.ObjectMeta.Annotations == nil { + crd.ObjectMeta.Annotations = map[string]string{} + } + crd.ObjectMeta.Annotations["controller-gen.kubebuilder.io/version"] = version.Version() +} + +// FindMetav1 locates the actual package representing metav1 amongst +// the imports of the roots. +func FindMetav1(roots []*loader.Package) *loader.Package { + for _, root := range roots { + pkg := root.Imports()["k8s.io/apimachinery/pkg/apis/meta/v1"] + if pkg != nil { + return pkg + } + } + return nil +} + +// FindKubeKinds locates all types that contain TypeMeta and ObjectMeta +// (and thus may be a Kubernetes object), and returns the corresponding +// group-kinds. +func FindKubeKinds(parser *Parser, metav1Pkg *loader.Package) map[schema.GroupKind]struct{} { + // TODO(directxman12): technically, we should be finding metav1 per-package + kubeKinds := map[schema.GroupKind]struct{}{} + for typeIdent, info := range parser.Types { + hasObjectMeta := false + hasTypeMeta := false + + pkg := typeIdent.Package + pkg.NeedTypesInfo() + typesInfo := pkg.TypesInfo + + for _, field := range info.Fields { + if field.Name != "" { + // type and object meta are embedded, + // so they can't be this + continue + } + + fieldType := typesInfo.TypeOf(field.RawField.Type) + namedField, isNamed := fieldType.(*types.Named) + if !isNamed { + // ObjectMeta and TypeMeta are named types + continue + } + if namedField.Obj().Pkg() == nil { + // Embedded non-builtin universe type (specifically, it's probably `error`), + // so it can't be ObjectMeta or TypeMeta + continue + } + fieldPkgPath := loader.NonVendorPath(namedField.Obj().Pkg().Path()) + fieldPkg := pkg.Imports()[fieldPkgPath] + if fieldPkg != metav1Pkg { + continue + } + + switch namedField.Obj().Name() { + case "ObjectMeta": + hasObjectMeta = true + case "TypeMeta": + hasTypeMeta = true + } + } + + if !hasObjectMeta || !hasTypeMeta { + continue + } + + groupKind := schema.GroupKind{ + Group: parser.GroupVersions[pkg].Group, + Kind: typeIdent.Name, + } + kubeKinds[groupKind] = struct{}{} + } + + return kubeKinds +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go new file mode 100644 index 000000000..4649ca3ac --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/known_types.go @@ -0,0 +1,120 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package crd + +import ( + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// KnownPackages overrides types in some comment packages that have custom validation +// but don't have validation markers on them (since they're from core Kubernetes). +var KnownPackages = map[string]PackageOverride{ + + "k8s.io/apimachinery/pkg/apis/meta/v1": func(p *Parser, pkg *loader.Package) { + // ObjectMeta is managed by the Kubernetes API server, so no need to + // generate validation for it. + p.Schemata[TypeIdent{Name: "ObjectMeta", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "object", + } + p.Schemata[TypeIdent{Name: "Time", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "string", + Format: "date-time", + } + p.Schemata[TypeIdent{Name: "MicroTime", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "string", + Format: "date-time", + } + p.Schemata[TypeIdent{Name: "Duration", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "string", + } + p.Schemata[TypeIdent{Name: "Fields", Package: pkg}] = apiext.JSONSchemaProps{ + // this is a recursive structure that can't be flattened or, for that matter, properly generated. + // so just treat it as an arbitrary map + Type: "object", + AdditionalProperties: &apiext.JSONSchemaPropsOrBool{Allows: true}, + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/api/resource": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "Quantity", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + XIntOrString: true, + AnyOf: []apiext.JSONSchemaProps{ + {Type: "integer"}, + {Type: "string"}, + }, + Pattern: "^(\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\\+|-)?(([0-9]+(\\.[0-9]*)?)|(\\.[0-9]+))))?$", + } + // No point in calling AddPackage, this is the sole inhabitant + }, + + "k8s.io/apimachinery/pkg/runtime": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "RawExtension", Package: pkg}] = apiext.JSONSchemaProps{ + // TODO(directxman12): regexp validation for this (or get kube to support it as a format value) + Type: "object", + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "Unstructured", Package: pkg}] = apiext.JSONSchemaProps{ + Type: "object", + } + p.AddPackage(pkg) // get the rest of the types + }, + + "k8s.io/apimachinery/pkg/util/intstr": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "IntOrString", Package: pkg}] = apiext.JSONSchemaProps{ + XIntOrString: true, + AnyOf: []apiext.JSONSchemaProps{ + {Type: "integer"}, + {Type: "string"}, + }, + } + // No point in calling AddPackage, this is the sole inhabitant + }, + + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "JSON", Package: pkg}] = apiext.JSONSchemaProps{ + XPreserveUnknownFields: boolPtr(true), + } + p.AddPackage(pkg) // get the rest of the types + }, + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1": func(p *Parser, pkg *loader.Package) { + p.Schemata[TypeIdent{Name: "JSON", Package: pkg}] = apiext.JSONSchemaProps{ + XPreserveUnknownFields: boolPtr(true), + } + p.AddPackage(pkg) // get the rest of the types + }, +} + +func boolPtr(b bool) *bool { + return &b +} + +// AddKnownTypes registers the packages overrides in KnownPackages with the given parser. +func AddKnownTypes(parser *Parser) { + // ensure everything is there before adding to PackageOverrides + // TODO(directxman12): this is a bit of a hack, maybe just use constructors? + parser.init() + for pkgName, override := range KnownPackages { + parser.PackageOverrides[pkgName] = override + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go new file mode 100644 index 000000000..9f1d19ac9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/crd.go @@ -0,0 +1,318 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// CRDMarkers lists all markers that directly modify the CRD (not validation +// schemas). +var CRDMarkers = []*definitionWithHelp{ + // TODO(directxman12): more detailed help + must(markers.MakeDefinition("kubebuilder:subresource:status", markers.DescribesType, SubresourceStatus{})). + WithHelp(SubresourceStatus{}.Help()), + + must(markers.MakeDefinition("kubebuilder:subresource:scale", markers.DescribesType, SubresourceScale{})). + WithHelp(SubresourceScale{}.Help()), + + must(markers.MakeDefinition("kubebuilder:printcolumn", markers.DescribesType, PrintColumn{})). + WithHelp(PrintColumn{}.Help()), + + must(markers.MakeDefinition("kubebuilder:resource", markers.DescribesType, Resource{})). + WithHelp(Resource{}.Help()), + + must(markers.MakeDefinition("kubebuilder:storageversion", markers.DescribesType, StorageVersion{})). + WithHelp(StorageVersion{}.Help()), + + must(markers.MakeDefinition("kubebuilder:skipversion", markers.DescribesType, SkipVersion{})). + WithHelp(SkipVersion{}.Help()), + + must(markers.MakeDefinition("kubebuilder:unservedversion", markers.DescribesType, UnservedVersion{})). + WithHelp(UnservedVersion{}.Help()), +} + +// TODO: categories and singular used to be annotations types +// TODO: doc + +func init() { + AllDefinitions = append(AllDefinitions, CRDMarkers...) +} + +// +controllertools:marker:generateHelp:category=CRD + +// SubresourceStatus enables the "/status" subresource on a CRD. +type SubresourceStatus struct{} + +func (s SubresourceStatus) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var subresources *apiext.CustomResourceSubresources + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = ver.Subresources + break + } + if subresources == nil { + return fmt.Errorf("status subresource applied to version %q not in CRD", version) + } + subresources.Status = &apiext.CustomResourceSubresourceStatus{} + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// SubresourceScale enables the "/scale" subresource on a CRD. +type SubresourceScale struct { + // marker names are leftover legacy cruft + + // SpecPath specifies the jsonpath to the replicas field for the scale's spec. + SpecPath string `marker:"specpath"` + + // StatusPath specifies the jsonpath to the replicas field for the scale's status. + StatusPath string `marker:"statuspath"` + + // SelectorPath specifies the jsonpath to the pod label selector field for the scale's status. + // + // The selector field must be the *string* form (serialized form) of a selector. + // Setting a pod label selector is necessary for your type to work with the HorizontalPodAutoscaler. + SelectorPath *string `marker:"selectorpath"` +} + +func (s SubresourceScale) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var subresources *apiext.CustomResourceSubresources + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + subresources = ver.Subresources + break + } + if subresources == nil { + return fmt.Errorf("scale subresource applied to version %q not in CRD", version) + } + subresources.Scale = &apiext.CustomResourceSubresourceScale{ + SpecReplicasPath: s.SpecPath, + StatusReplicasPath: s.StatusPath, + LabelSelectorPath: s.SelectorPath, + } + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// StorageVersion marks this version as the "storage version" for the CRD for conversion. +// +// When conversion is enabled for a CRD (i.e. it's not a trivial-versions/single-version CRD), +// one version is set as the "storage version" to be stored in etcd. Attempting to store any +// other version will result in conversion to the storage version via a conversion webhook. +type StorageVersion struct{} + +func (s StorageVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + if version == "" { + // single-version, do nothing + return nil + } + // multi-version + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + ver.Storage = true + break + } + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// SkipVersion removes the particular version of the CRD from the CRDs spec. +// +// This is useful if you need to skip generating and listing version entries +// for 'internal' resource versions, which typically exist if using the +// Kubernetes upstream conversion-gen tool. +type SkipVersion struct{} + +func (s SkipVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + if version == "" { + // single-version, this is an invalid state + return fmt.Errorf("cannot skip a version if there is only a single version") + } + var versions []apiext.CustomResourceDefinitionVersion + // multi-version + for i := range crd.Versions { + ver := crd.Versions[i] + if ver.Name == version { + // skip the skipped version + continue + } + versions = append(versions, ver) + } + crd.Versions = versions + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// PrintColumn adds a column to "kubectl get" output for this CRD. +type PrintColumn struct { + // Name specifies the name of the column. + Name string + + // Type indicates the type of the column. + // + // It may be any OpenAPI data type listed at + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types. + Type string + + // JSONPath specifies the jsonpath expression used to extract the value of the column. + JSONPath string `marker:"JSONPath"` // legacy cruft + + // Description specifies the help/description for this column. + Description string `marker:",optional"` + + // Format specifies the format of the column. + // + // It may be any OpenAPI data format corresponding to the type, listed at + // https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types. + Format string `marker:",optional"` + + // Priority indicates how important it is that this column be displayed. + // + // Lower priority (*higher* numbered) columns will be hidden if the terminal + // width is too small. + Priority int32 `marker:",optional"` +} + +func (s PrintColumn) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + var columns *[]apiext.CustomResourceColumnDefinition + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + if ver.Subresources == nil { + ver.Subresources = &apiext.CustomResourceSubresources{} + } + columns = &ver.AdditionalPrinterColumns + break + } + if columns == nil { + return fmt.Errorf("printer columns applied to version %q not in CRD", version) + } + + *columns = append(*columns, apiext.CustomResourceColumnDefinition{ + Name: s.Name, + Type: s.Type, + JSONPath: s.JSONPath, + Description: s.Description, + Format: s.Format, + Priority: s.Priority, + }) + + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// Resource configures naming and scope for a CRD. +type Resource struct { + // Path specifies the plural "resource" for this CRD. + // + // It generally corresponds to a plural, lower-cased version of the Kind. + // See https://book.kubebuilder.io/cronjob-tutorial/gvks.html. + Path string `marker:",optional"` + + // ShortName specifies aliases for this CRD. + // + // Short names are often used when people have work with your resource + // over and over again. For instance, "rs" for "replicaset" or + // "crd" for customresourcedefinition. + ShortName []string `marker:",optional"` + + // Categories specifies which group aliases this resource is part of. + // + // Group aliases are used to work with groups of resources at once. + // The most common one is "all" which covers about a third of the base + // resources in Kubernetes, and is generally used for "user-facing" resources. + Categories []string `marker:",optional"` + + // Singular overrides the singular form of your resource. + // + // The singular form is otherwise defaulted off the plural (path). + Singular string `marker:",optional"` + + // Scope overrides the scope of the CRD (Cluster vs Namespaced). + // + // Scope defaults to "Namespaced". Cluster-scoped ("Cluster") resources + // don't exist in namespaces. + Scope string `marker:",optional"` +} + +func (s Resource) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + if s.Path != "" { + crd.Names.Plural = s.Path + } + if s.Singular != "" { + crd.Names.Singular = s.Singular + } + crd.Names.ShortNames = s.ShortName + crd.Names.Categories = s.Categories + + switch s.Scope { + case "": + crd.Scope = apiext.NamespaceScoped + default: + crd.Scope = apiext.ResourceScope(s.Scope) + } + + return nil +} + +// +controllertools:marker:generateHelp:category=CRD + +// UnservedVersion does not serve this version. +// +// This is useful if you need to drop support for a version in favor of a newer version. +type UnservedVersion struct{} + +func (s UnservedVersion) ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error { + for i := range crd.Versions { + ver := &crd.Versions[i] + if ver.Name != version { + continue + } + ver.Served = false + break + } + return nil +} + +// NB(directxman12): singular was historically distinct, so we keep it here for backwards compat diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go new file mode 100644 index 000000000..48736401c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/doc.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package markers defines markers for generating schema valiation +// and CRD structure. +// +// All markers related to CRD generation live in AllDefinitions. +// +// Validation Markers +// +// Validation markers have values that implement ApplyToSchema +// (crd.SchemaMarker). Any marker implementing this will automatically +// be run after the rest of a given schema node has been generated. +// Markers that need to be run before any other markers can also +// implement ApplyFirst, but this is discouraged and may change +// in the future. +// +// All validation markers start with "+kubebuilder:validation", and +// have the same name as their type name. +// +// CRD Markers +// +// Markers that modify anything in the CRD itself *except* for the schema +// implement ApplyToCRD (crd.CRDMarker). They are expected to detect whether +// they should apply themselves to a specific version in the CRD (as passed to +// them), or to the root-level CRD for legacy cases. They are applied *after* +// the rest of the CRD is computed. +// +// Misc +// +// This package also defines the "+groupName" and "+versionName" package-level +// markers, for defining package<->group-version mappings. +package markers diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go new file mode 100644 index 000000000..cebe8fa4b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/package.go @@ -0,0 +1,40 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func init() { + AllDefinitions = append(AllDefinitions, + must(markers.MakeDefinition("groupName", markers.DescribesPackage, "")). + WithHelp(markers.SimpleHelp("CRD", "specifies the API group name for this package.")), + + must(markers.MakeDefinition("versionName", markers.DescribesPackage, "")). + WithHelp(markers.SimpleHelp("CRD", "overrides the API group version for this package (defaults to the package name).")), + + must(markers.MakeDefinition("kubebuilder:validation:Optional", markers.DescribesPackage, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that all fields in this package are optional by default.")), + + must(markers.MakeDefinition("kubebuilder:validation:Required", markers.DescribesPackage, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that all fields in this package are required by default.")), + + must(markers.MakeDefinition("kubebuilder:skip", markers.DescribesPackage, struct{}{})). + WithHelp(markers.SimpleHelp("CRD", "don't consider this package as an API version.")), + ) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go new file mode 100644 index 000000000..0e7c42694 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/register.go @@ -0,0 +1,83 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "reflect" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +type definitionWithHelp struct { + *markers.Definition + Help *markers.DefinitionHelp +} + +func (d *definitionWithHelp) WithHelp(help *markers.DefinitionHelp) *definitionWithHelp { + d.Help = help + return d +} + +func (d *definitionWithHelp) Register(reg *markers.Registry) error { + if err := reg.Register(d.Definition); err != nil { + return err + } + if d.Help != nil { + reg.AddHelp(d.Definition, d.Help) + } + return nil +} + +func must(def *markers.Definition, err error) *definitionWithHelp { + return &definitionWithHelp{ + Definition: markers.Must(def, err), + } +} + +// AllDefinitions contains all marker definitions for this package. +var AllDefinitions []*definitionWithHelp + +type hasHelp interface { + Help() *markers.DefinitionHelp +} + +// mustMakeAllWithPrefix converts each object into a marker definition using +// the object's type's with the prefix to form the marker name. +func mustMakeAllWithPrefix(prefix string, target markers.TargetType, objs ...interface{}) []*definitionWithHelp { + defs := make([]*definitionWithHelp, len(objs)) + for i, obj := range objs { + name := prefix + ":" + reflect.TypeOf(obj).Name() + def, err := markers.MakeDefinition(name, target, obj) + if err != nil { + panic(err) + } + defs[i] = &definitionWithHelp{Definition: def, Help: obj.(hasHelp).Help()} + } + + return defs +} + +// Register registers all definitions for CRD generation to the given registry. +func Register(reg *markers.Registry) error { + for _, def := range AllDefinitions { + if err := def.Register(reg); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go new file mode 100644 index 000000000..c5a3b80e9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/topology.go @@ -0,0 +1,155 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// TopologyMarkers specify topology markers (i.e. markers that describe if a +// list behaves as an associative-list or a set, if a map is atomic or not). +var TopologyMarkers = []*definitionWithHelp{ + must(markers.MakeDefinition("listMapKey", markers.DescribesField, ListMapKey(""))). + WithHelp(ListMapKey("").Help()), + must(markers.MakeDefinition("listType", markers.DescribesField, ListType(""))). + WithHelp(ListType("").Help()), + must(markers.MakeDefinition("mapType", markers.DescribesField, MapType(""))). + WithHelp(MapType("").Help()), + must(markers.MakeDefinition("structType", markers.DescribesField, StructType(""))). + WithHelp(StructType("").Help()), +} + +func init() { + AllDefinitions = append(AllDefinitions, TopologyMarkers...) +} + +// +controllertools:marker:generateHelp:category="CRD processing" + +// ListType specifies the type of data-structure that the list +// represents (map, set, atomic). +// +// Possible data-structure types of a list are: +// +// - "map": it needs to have a key field, which will be used to build an +// associative list. A typical example is a the pod container list, +// which is indexed by the container name. +// +// - "set": Fields need to be "scalar", and there can be only one +// occurrence of each. +// +// - "atomic": All the fields in the list are treated as a single value, +// are typically manipulated together by the same actor. +type ListType string + +// +controllertools:marker:generateHelp:category="CRD processing" + +// ListMapKey specifies the keys to map listTypes. +// +// It indicates the index of a map list. They can be repeated if multiple keys +// must be used. It can only be used when ListType is set to map, and the keys +// should be scalar types. +type ListMapKey string + +// +controllertools:marker:generateHelp:category="CRD processing" + +// MapType specifies the level of atomicity of the map; +// i.e. whether each item in the map is independent of the others, +// or all fields are treated as a single unit. +// +// Possible values: +// +// - "granular": items in the map are independent of each other, +// and can be manipulated by different actors. +// This is the default behavior. +// +// - "atomic": all fields are treated as one unit. +// Any changes have to replace the entire map. +type MapType string + +// +controllertools:marker:generateHelp:category="CRD processing" + +// StructType specifies the level of atomicity of the struct; +// i.e. whether each field in the struct is independent of the others, +// or all fields are treated as a single unit. +// +// Possible values: +// +// - "granular": fields in the struct are independent of each other, +// and can be manipulated by different actors. +// This is the default behavior. +// +// - "atomic": all fields are treated as one unit. +// Any changes have to replace the entire struct. +type StructType string + +func (l ListType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply listType to an array") + } + if l != "map" && l != "atomic" && l != "set" { + return fmt.Errorf(`ListType must be either "map", "set" or "atomic"`) + } + p := string(l) + schema.XListType = &p + return nil +} + +func (l ListType) ApplyFirst() {} + +func (l ListMapKey) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply listMapKey to an array") + } + if schema.XListType == nil || *schema.XListType != "map" { + return fmt.Errorf("must apply listMapKey to an associative-list") + } + schema.XListMapKeys = append(schema.XListMapKeys, string(l)) + return nil +} + +func (m MapType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "object" { + return fmt.Errorf("must apply mapType to an object") + } + + if m != "atomic" && m != "granular" { + return fmt.Errorf(`MapType must be either "granular" or "atomic"`) + } + + p := string(m) + schema.XMapType = &p + + return nil +} + +func (s StructType) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "object" && schema.Type != "" { + return fmt.Errorf("must apply structType to an object; either explicitly set or defaulted through an empty schema type") + } + + if s != "atomic" && s != "granular" { + return fmt.Errorf(`StructType must be either "granular" or "atomic"`) + } + + p := string(s) + schema.XMapType = &p + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go new file mode 100644 index 000000000..d765e6d92 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/validation.go @@ -0,0 +1,350 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + + "encoding/json" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// ValidationMarkers lists all available markers that affect CRD schema generation, +// except for the few that don't make sense as type-level markers (see FieldOnlyMarkers). +// All markers start with `+kubebuilder:validation:`, and continue with their type name. +// A copy is produced of all markers that describes types as well, for making types +// reusable and writing complex validations on slice items. +var ValidationMarkers = mustMakeAllWithPrefix("kubebuilder:validation", markers.DescribesField, + + // integer markers + + Maximum(0), + Minimum(0), + ExclusiveMaximum(false), + ExclusiveMinimum(false), + MultipleOf(0), + + // string markers + + MaxLength(0), + MinLength(0), + Pattern(""), + + // slice markers + + MaxItems(0), + MinItems(0), + UniqueItems(false), + + // general markers + + Enum(nil), + Format(""), + Type(""), + XPreserveUnknownFields{}, + XEmbeddedResource{}, +) + +// FieldOnlyMarkers list field-specific validation markers (i.e. those markers that don't make +// sense on a type, and thus aren't in ValidationMarkers). +var FieldOnlyMarkers = []*definitionWithHelp{ + must(markers.MakeDefinition("kubebuilder:validation:Required", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is required, if fields are optional by default.")), + must(markers.MakeDefinition("kubebuilder:validation:Optional", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is optional, if fields are required by default.")), + must(markers.MakeDefinition("optional", markers.DescribesField, struct{}{})). + WithHelp(markers.SimpleHelp("CRD validation", "specifies that this field is optional, if fields are required by default.")), + + must(markers.MakeDefinition("nullable", markers.DescribesField, Nullable{})). + WithHelp(Nullable{}.Help()), + + must(markers.MakeAnyTypeDefinition("kubebuilder:default", markers.DescribesField, Default{})). + WithHelp(Default{}.Help()), + + must(markers.MakeDefinition("kubebuilder:pruning:PreserveUnknownFields", markers.DescribesField, XPreserveUnknownFields{})). + WithHelp(XPreserveUnknownFields{}.Help()), + must(markers.MakeDefinition("kubebuilder:validation:EmbeddedResource", markers.DescribesField, XEmbeddedResource{})). + WithHelp(XEmbeddedResource{}.Help()), +} + +func init() { + AllDefinitions = append(AllDefinitions, ValidationMarkers...) + + for _, def := range ValidationMarkers { + newDef := *def.Definition + // copy both parts so we don't change the definition + typDef := definitionWithHelp{ + Definition: &newDef, + Help: def.Help, + } + typDef.Target = markers.DescribesType + AllDefinitions = append(AllDefinitions, &typDef) + } + + AllDefinitions = append(AllDefinitions, FieldOnlyMarkers...) +} + +// +controllertools:marker:generateHelp:category="CRD validation" +// Maximum specifies the maximum numeric value that this field can have. +type Maximum int + +// +controllertools:marker:generateHelp:category="CRD validation" +// Minimum specifies the minimum numeric value that this field can have. +type Minimum int + +// +controllertools:marker:generateHelp:category="CRD validation" +// ExclusiveMinimum indicates that the minimum is "up to" but not including that value. +type ExclusiveMinimum bool + +// +controllertools:marker:generateHelp:category="CRD validation" +// ExclusiveMaximum indicates that the maximum is "up to" but not including that value. +type ExclusiveMaximum bool + +// +controllertools:marker:generateHelp:category="CRD validation" +// MultipleOf specifies that this field must have a numeric value that's a multiple of this one. +type MultipleOf int + +// +controllertools:marker:generateHelp:category="CRD validation" +// MaxLength specifies the maximum length for this string. +type MaxLength int + +// +controllertools:marker:generateHelp:category="CRD validation" +// MinLength specifies the minimum length for this string. +type MinLength int + +// +controllertools:marker:generateHelp:category="CRD validation" +// Pattern specifies that this string must match the given regular expression. +type Pattern string + +// +controllertools:marker:generateHelp:category="CRD validation" +// MaxItems specifies the maximum length for this list. +type MaxItems int + +// +controllertools:marker:generateHelp:category="CRD validation" +// MinItems specifies the minimun length for this list. +type MinItems int + +// +controllertools:marker:generateHelp:category="CRD validation" +// UniqueItems specifies that all items in this list must be unique. +type UniqueItems bool + +// +controllertools:marker:generateHelp:category="CRD validation" +// Enum specifies that this (scalar) field is restricted to the *exact* values specified here. +type Enum []interface{} + +// +controllertools:marker:generateHelp:category="CRD validation" +// Format specifies additional "complex" formatting for this field. +// +// For example, a date-time field would be marked as "type: string" and +// "format: date-time". +type Format string + +// +controllertools:marker:generateHelp:category="CRD validation" +// Type overrides the type for this field (which defaults to the equivalent of the Go type). +// +// This generally must be paired with custom serialization. For example, the +// metav1.Time field would be marked as "type: string" and "format: date-time". +type Type string + +// +controllertools:marker:generateHelp:category="CRD validation" +// Nullable marks this field as allowing the "null" value. +// +// This is often not necessary, but may be helpful with custom serialization. +type Nullable struct{} + +// +controllertools:marker:generateHelp:category="CRD validation" +// Default sets the default value for this field. +// +// A default value will be accepted as any value valid for the +// field. Formatting for common types include: boolean: `true`, string: +// `Cluster`, numerical: `1.24`, array: `{1,2}`, object: `{policy: +// "delete"}`). Defaults should be defined in pruned form, and only best-effort +// validation will be performed. Full validation of a default requires +// submission of the containing CRD to an apiserver. +type Default struct { + Value interface{} +} + +// +controllertools:marker:generateHelp:category="CRD processing" +// PreserveUnknownFields stops the apiserver from pruning fields which are not specified. +// +// By default the apiserver drops unknown fields from the request payload +// during the decoding step. This marker stops the API server from doing so. +// It affects fields recursively, but switches back to normal pruning behaviour +// if nested properties or additionalProperties are specified in the schema. +// This can either be true or undefined. False +// is forbidden. +type XPreserveUnknownFields struct{} + +// +controllertools:marker:generateHelp:category="CRD validation" +// EmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields. +// +// An embedded resource is a value that has apiVersion, kind and metadata fields. +// They are validated implicitly according to the semantics of the currently +// running apiserver. It is not necessary to add any additional schema for these +// field, yet it is possible. This can be combined with PreserveUnknownFields. +type XEmbeddedResource struct{} + +func (m Maximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply maximum to an integer") + } + val := float64(m) + schema.Maximum = &val + return nil +} +func (m Minimum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply minimum to an integer") + } + val := float64(m) + schema.Minimum = &val + return nil +} +func (m ExclusiveMaximum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply exclusivemaximum to an integer") + } + schema.ExclusiveMaximum = bool(m) + return nil +} +func (m ExclusiveMinimum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply exclusiveminimum to an integer") + } + schema.ExclusiveMinimum = bool(m) + return nil +} +func (m MultipleOf) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "integer" { + return fmt.Errorf("must apply multipleof to an integer") + } + val := float64(m) + schema.MultipleOf = &val + return nil +} + +func (m MaxLength) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply maxlength to a string") + } + val := int64(m) + schema.MaxLength = &val + return nil +} +func (m MinLength) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply minlength to a string") + } + val := int64(m) + schema.MinLength = &val + return nil +} +func (m Pattern) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "string" { + return fmt.Errorf("must apply pattern to a string") + } + schema.Pattern = string(m) + return nil +} + +func (m MaxItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply maxitem to an array") + } + val := int64(m) + schema.MaxItems = &val + return nil +} +func (m MinItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply minitems to an array") + } + val := int64(m) + schema.MinItems = &val + return nil +} +func (m UniqueItems) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + if schema.Type != "array" { + return fmt.Errorf("must apply uniqueitems to an array") + } + schema.UniqueItems = bool(m) + return nil +} + +func (m Enum) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + // TODO(directxman12): this is a bit hacky -- we should + // probably support AnyType better + using the schema structure + vals := make([]apiext.JSON, len(m)) + for i, val := range m { + // TODO(directxman12): check actual type with schema type? + // if we're expecting a string, marshal the string properly... + // NB(directxman12): we use json.Marshal to ensure we handle JSON escaping properly + valMarshalled, err := json.Marshal(val) + if err != nil { + return err + } + vals[i] = apiext.JSON{Raw: valMarshalled} + } + schema.Enum = vals + return nil +} +func (m Format) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + schema.Format = string(m) + return nil +} + +// NB(directxman12): we "typecheck" on target schema properties here, +// which means the "Type" marker *must* be applied first. +// TODO(directxman12): find a less hacky way to do this +// (we could preserve ordering of markers, but that feels bad in its own right). + +func (m Type) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + schema.Type = string(m) + return nil +} + +func (m Type) ApplyFirst() {} + +func (m Nullable) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + schema.Nullable = true + return nil +} + +// Defaults are only valid CRDs created with the v1 API +func (m Default) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + marshalledDefault, err := json.Marshal(m.Value) + if err != nil { + return err + } + schema.Default = &apiext.JSON{Raw: marshalledDefault} + return nil +} + +func (m XPreserveUnknownFields) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + defTrue := true + schema.XPreserveUnknownFields = &defTrue + return nil +} + +func (m XEmbeddedResource) ApplyToSchema(schema *apiext.JSONSchemaProps) error { + schema.XEmbeddedResource = true + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go new file mode 100644 index 000000000..152432bee --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/markers/zz_generated.markerhelp.go @@ -0,0 +1,408 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package markers + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Default) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "sets the default value for this field. ", + Details: "A default value will be accepted as any value valid for the field. Formatting for common types include: boolean: `true`, string: `Cluster`, numerical: `1.24`, array: `{1,2}`, object: `{policy: \"delete\"}`). Defaults should be defined in pruned form, and only best-effort validation will be performed. Full validation of a default requires submission of the containing CRD to an apiserver.", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Value": markers.DetailedHelp{ + Summary: "", + Details: "", + }, + }, + } +} + +func (Enum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that this (scalar) field is restricted to the *exact* values specified here.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (ExclusiveMaximum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "indicates that the maximum is \"up to\" but not including that value.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (ExclusiveMinimum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "indicates that the minimum is \"up to\" but not including that value.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Format) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies additional \"complex\" formatting for this field. ", + Details: "For example, a date-time field would be marked as \"type: string\" and \"format: date-time\".", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (ListMapKey) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD processing", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the keys to map listTypes. ", + Details: "It indicates the index of a map list. They can be repeated if multiple keys must be used. It can only be used when ListType is set to map, and the keys should be scalar types.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (ListType) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD processing", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the type of data-structure that the list represents (map, set, atomic). ", + Details: "Possible data-structure types of a list are: \n - \"map\": it needs to have a key field, which will be used to build an associative list. A typical example is a the pod container list, which is indexed by the container name. \n - \"set\": Fields need to be \"scalar\", and there can be only one occurrence of each. \n - \"atomic\": All the fields in the list are treated as a single value, are typically manipulated together by the same actor.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MapType) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD processing", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the level of atomicity of the map; i.e. whether each item in the map is independent of the others, or all fields are treated as a single unit. ", + Details: "Possible values: \n - \"granular\": items in the map are independent of each other, and can be manipulated by different actors. This is the default behavior. \n - \"atomic\": all fields are treated as one unit. Any changes have to replace the entire map.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MaxItems) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the maximum length for this list.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MaxLength) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the maximum length for this string.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Maximum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the maximum numeric value that this field can have.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MinItems) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the minimun length for this list.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MinLength) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the minimum length for this string.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Minimum) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the minimum numeric value that this field can have.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (MultipleOf) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that this field must have a numeric value that's a multiple of this one.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Nullable) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "marks this field as allowing the \"null\" value. ", + Details: "This is often not necessary, but may be helpful with custom serialization.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Pattern) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that this string must match the given regular expression.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (PrintColumn) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "adds a column to \"kubectl get\" output for this CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Name": markers.DetailedHelp{ + Summary: "specifies the name of the column.", + Details: "", + }, + "Type": markers.DetailedHelp{ + Summary: "indicates the type of the column. ", + Details: "It may be any OpenAPI data type listed at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", + }, + "JSONPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath expression used to extract the value of the column.", + Details: "", + }, + "Description": markers.DetailedHelp{ + Summary: "specifies the help/description for this column.", + Details: "", + }, + "Format": markers.DetailedHelp{ + Summary: "specifies the format of the column. ", + Details: "It may be any OpenAPI data format corresponding to the type, listed at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types.", + }, + "Priority": markers.DetailedHelp{ + Summary: "indicates how important it is that this column be displayed. ", + Details: "Lower priority (*higher* numbered) columns will be hidden if the terminal width is too small.", + }, + }, + } +} + +func (Resource) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "configures naming and scope for a CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Path": markers.DetailedHelp{ + Summary: "specifies the plural \"resource\" for this CRD. ", + Details: "It generally corresponds to a plural, lower-cased version of the Kind. See https://book.kubebuilder.io/cronjob-tutorial/gvks.html.", + }, + "ShortName": markers.DetailedHelp{ + Summary: "specifies aliases for this CRD. ", + Details: "Short names are often used when people have work with your resource over and over again. For instance, \"rs\" for \"replicaset\" or \"crd\" for customresourcedefinition.", + }, + "Categories": markers.DetailedHelp{ + Summary: "specifies which group aliases this resource is part of. ", + Details: "Group aliases are used to work with groups of resources at once. The most common one is \"all\" which covers about a third of the base resources in Kubernetes, and is generally used for \"user-facing\" resources.", + }, + "Singular": markers.DetailedHelp{ + Summary: "overrides the singular form of your resource. ", + Details: "The singular form is otherwise defaulted off the plural (path).", + }, + "Scope": markers.DetailedHelp{ + Summary: "overrides the scope of the CRD (Cluster vs Namespaced). ", + Details: "Scope defaults to \"Namespaced\". Cluster-scoped (\"Cluster\") resources don't exist in namespaces.", + }, + }, + } +} + +func (SkipVersion) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "removes the particular version of the CRD from the CRDs spec. ", + Details: "This is useful if you need to skip generating and listing version entries for 'internal' resource versions, which typically exist if using the Kubernetes upstream conversion-gen tool.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (StorageVersion) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "marks this version as the \"storage version\" for the CRD for conversion. ", + Details: "When conversion is enabled for a CRD (i.e. it's not a trivial-versions/single-version CRD), one version is set as the \"storage version\" to be stored in etcd. Attempting to store any other version will result in conversion to the storage version via a conversion webhook.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (StructType) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD processing", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies the level of atomicity of the struct; i.e. whether each field in the struct is independent of the others, or all fields are treated as a single unit. ", + Details: "Possible values: \n - \"granular\": fields in the struct are independent of each other, and can be manipulated by different actors. This is the default behavior. \n - \"atomic\": all fields are treated as one unit. Any changes have to replace the entire struct.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (SubresourceScale) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "enables the \"/scale\" subresource on a CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "SpecPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath to the replicas field for the scale's spec.", + Details: "", + }, + "StatusPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath to the replicas field for the scale's status.", + Details: "", + }, + "SelectorPath": markers.DetailedHelp{ + Summary: "specifies the jsonpath to the pod label selector field for the scale's status. ", + Details: "The selector field must be the *string* form (serialized form) of a selector. Setting a pod label selector is necessary for your type to work with the HorizontalPodAutoscaler.", + }, + }, + } +} + +func (SubresourceStatus) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "enables the \"/status\" subresource on a CRD.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (Type) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "overrides the type for this field (which defaults to the equivalent of the Go type). ", + Details: "This generally must be paired with custom serialization. For example, the metav1.Time field would be marked as \"type: string\" and \"format: date-time\".", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (UniqueItems) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies that all items in this list must be unique.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (UnservedVersion) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD", + DetailedHelp: markers.DetailedHelp{ + Summary: "does not serve this version. ", + Details: "This is useful if you need to drop support for a version in favor of a newer version.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (XEmbeddedResource) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD validation", + DetailedHelp: markers.DetailedHelp{ + Summary: "EmbeddedResource marks a fields as an embedded resource with apiVersion, kind and metadata fields. ", + Details: "An embedded resource is a value that has apiVersion, kind and metadata fields. They are validated implicitly according to the semantics of the currently running apiserver. It is not necessary to add any additional schema for these field, yet it is possible. This can be combined with PreserveUnknownFields.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (XPreserveUnknownFields) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "CRD processing", + DetailedHelp: markers.DetailedHelp{ + Summary: "PreserveUnknownFields stops the apiserver from pruning fields which are not specified. ", + Details: "By default the apiserver drops unknown fields from the request payload during the decoding step. This marker stops the API server from doing so. It affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go new file mode 100644 index 000000000..34398a8c7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/parser.go @@ -0,0 +1,243 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/ast" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// TypeIdent represents some type in a Package. +type TypeIdent struct { + Package *loader.Package + Name string +} + +func (t TypeIdent) String() string { + return fmt.Sprintf("%q.%s", t.Package.ID, t.Name) +} + +// PackageOverride overrides the loading of some package +// (potentially setting custom schemata, etc). It must +// call AddPackage if it wants to continue with the default +// loading behavior. +type PackageOverride func(p *Parser, pkg *loader.Package) + +// Parser knows how to parse out CRD information and generate +// OpenAPI schemata from some collection of types and markers. +// Most methods on Parser cache their results automatically, +// and thus may be called any number of times. +type Parser struct { + Collector *markers.Collector + + // Types contains the known TypeInfo for this parser. + Types map[TypeIdent]*markers.TypeInfo + // Schemata contains the known OpenAPI JSONSchemata for this parser. + Schemata map[TypeIdent]apiext.JSONSchemaProps + // GroupVersions contains the known group-versions of each package in this parser. + GroupVersions map[*loader.Package]schema.GroupVersion + // CustomResourceDefinitions contains the known CustomResourceDefinitions for types in this parser. + CustomResourceDefinitions map[schema.GroupKind]apiext.CustomResourceDefinition + // FlattenedSchemata contains fully flattened schemata for use in building + // CustomResourceDefinition validation. Each schema has been flattened by the flattener, + // and then embedded fields have been flattened with FlattenEmbedded. + FlattenedSchemata map[TypeIdent]apiext.JSONSchemaProps + + // PackageOverrides indicates that the loading of any package with + // the given path should be handled by the given overrider. + PackageOverrides map[string]PackageOverride + + // checker stores persistent partial type-checking/reference-traversal information. + Checker *loader.TypeChecker + // packages marks packages as loaded, to avoid re-loading them. + packages map[*loader.Package]struct{} + + flattener *Flattener +} + +func (p *Parser) init() { + if p.packages == nil { + p.packages = make(map[*loader.Package]struct{}) + } + if p.flattener == nil { + p.flattener = &Flattener{ + Parser: p, + } + } + if p.Schemata == nil { + p.Schemata = make(map[TypeIdent]apiext.JSONSchemaProps) + } + if p.Types == nil { + p.Types = make(map[TypeIdent]*markers.TypeInfo) + } + if p.PackageOverrides == nil { + p.PackageOverrides = make(map[string]PackageOverride) + } + if p.GroupVersions == nil { + p.GroupVersions = make(map[*loader.Package]schema.GroupVersion) + } + if p.CustomResourceDefinitions == nil { + p.CustomResourceDefinitions = make(map[schema.GroupKind]apiext.CustomResourceDefinition) + } + if p.FlattenedSchemata == nil { + p.FlattenedSchemata = make(map[TypeIdent]apiext.JSONSchemaProps) + } +} + +// indexTypes loads all types in the package into Types. +func (p *Parser) indexTypes(pkg *loader.Package) { + // autodetect + pkgMarkers, err := markers.PackageMarkers(p.Collector, pkg) + if err != nil { + pkg.AddError(err) + } else { + if skipPkg := pkgMarkers.Get("kubebuilder:skip"); skipPkg != nil { + return + } + if nameVal := pkgMarkers.Get("groupName"); nameVal != nil { + versionVal := pkg.Name // a reasonable guess + if versionMarker := pkgMarkers.Get("versionName"); versionMarker != nil { + versionVal = versionMarker.(string) + } + + p.GroupVersions[pkg] = schema.GroupVersion{ + Version: versionVal, + Group: nameVal.(string), + } + } + } + + if err := markers.EachType(p.Collector, pkg, func(info *markers.TypeInfo) { + ident := TypeIdent{ + Package: pkg, + Name: info.Name, + } + + p.Types[ident] = info + }); err != nil { + pkg.AddError(err) + } +} + +// LookupType fetches type info from Types. +func (p *Parser) LookupType(pkg *loader.Package, name string) *markers.TypeInfo { + return p.Types[TypeIdent{Package: pkg, Name: name}] +} + +// NeedSchemaFor indicates that a schema should be generated for the given type. +func (p *Parser) NeedSchemaFor(typ TypeIdent) { + p.init() + + p.NeedPackage(typ.Package) + if _, knownSchema := p.Schemata[typ]; knownSchema { + return + } + + info, knownInfo := p.Types[typ] + if !knownInfo { + typ.Package.AddError(fmt.Errorf("unknown type %s", typ)) + return + } + + // avoid tripping recursive schemata, like ManagedFields, by adding an empty WIP schema + p.Schemata[typ] = apiext.JSONSchemaProps{} + + schemaCtx := newSchemaContext(typ.Package, p) + ctxForInfo := schemaCtx.ForInfo(info) + + pkgMarkers, err := markers.PackageMarkers(p.Collector, typ.Package) + if err != nil { + typ.Package.AddError(err) + } + ctxForInfo.PackageMarkers = pkgMarkers + + schema := infoToSchema(ctxForInfo) + + p.Schemata[typ] = *schema +} + +func (p *Parser) NeedFlattenedSchemaFor(typ TypeIdent) { + p.init() + + if _, knownSchema := p.FlattenedSchemata[typ]; knownSchema { + return + } + + p.NeedSchemaFor(typ) + partialFlattened := p.flattener.FlattenType(typ) + fullyFlattened := FlattenEmbedded(partialFlattened, typ.Package) + + p.FlattenedSchemata[typ] = *fullyFlattened +} + +// NeedCRDFor lives off in spec.go + +// AddPackage indicates that types and type-checking information is needed +// for the the given package, *ignoring* overrides. +// Generally, consumers should call NeedPackage, while PackageOverrides should +// call AddPackage to continue with the normal loading procedure. +func (p *Parser) AddPackage(pkg *loader.Package) { + p.init() + if _, checked := p.packages[pkg]; checked { + return + } + p.indexTypes(pkg) + p.Checker.Check(pkg, filterTypesForCRDs) + p.packages[pkg] = struct{}{} +} + +// NeedPackage indicates that types and type-checking information +// is needed for the given package. +func (p *Parser) NeedPackage(pkg *loader.Package) { + p.init() + if _, checked := p.packages[pkg]; checked { + return + } + // overrides are going to be written without vendor. This is why we index by the actual + // object when we can. + if override, overridden := p.PackageOverrides[loader.NonVendorPath(pkg.PkgPath)]; overridden { + override(p, pkg) + p.packages[pkg] = struct{}{} + return + } + p.AddPackage(pkg) +} + +// filterTypesForCRDs filters out all nodes that aren't used in CRD generation, +// like interfaces and struct fields without JSON tag. +func filterTypesForCRDs(node ast.Node) bool { + switch node := node.(type) { + case *ast.InterfaceType: + // skip interfaces, we never care about references in them + return false + case *ast.StructType: + return true + case *ast.Field: + _, hasTag := loader.ParseAstTag(node.Tag).Lookup("json") + // fields without JSON tags mean we have custom serialization, + // so only visit fields with tags. + return hasTag + default: + return true + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go new file mode 100644 index 000000000..89237e6a7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema.go @@ -0,0 +1,419 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + "fmt" + "go/ast" + "go/types" + "strings" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Schema flattening is done in a recursive mapping method. +// Start reading at infoToSchema. + +const ( + // defPrefix is the prefix used to link to definitions in the OpenAPI schema. + defPrefix = "#/definitions/" +) + +var ( + // byteType is the types.Type for byte (see the types documention + // for why we need to look this up in the Universe), saved + // for quick comparison. + byteType = types.Universe.Lookup("byte").Type() +) + +// SchemaMarker is any marker that needs to modify the schema of the underlying type or field. +type SchemaMarker interface { + // ApplyToSchema is called after the rest of the schema for a given type + // or field is generated, to modify the schema appropriately. + ApplyToSchema(*apiext.JSONSchemaProps) error +} + +// applyFirstMarker is applied before any other markers. It's a bit of a hack. +type applyFirstMarker interface { + ApplyFirst() +} + +// schemaRequester knows how to marker that another schema (e.g. via an external reference) is necessary. +type schemaRequester interface { + NeedSchemaFor(typ TypeIdent) +} + +// schemaContext stores and provides information across a hierarchy of schema generation. +type schemaContext struct { + pkg *loader.Package + info *markers.TypeInfo + + schemaRequester schemaRequester + PackageMarkers markers.MarkerValues +} + +// newSchemaContext constructs a new schemaContext for the given package and schema requester. +// It must have type info added before use via ForInfo. +func newSchemaContext(pkg *loader.Package, req schemaRequester) *schemaContext { + pkg.NeedTypesInfo() + return &schemaContext{ + pkg: pkg, + schemaRequester: req, + } +} + +// ForInfo produces a new schemaContext with containing the same information +// as this one, except with the given type information. +func (c *schemaContext) ForInfo(info *markers.TypeInfo) *schemaContext { + return &schemaContext{ + pkg: c.pkg, + info: info, + schemaRequester: c.schemaRequester, + } +} + +// requestSchema asks for the schema for a type in the package with the +// given import path. +func (c *schemaContext) requestSchema(pkgPath, typeName string) { + pkg := c.pkg + if pkgPath != "" { + pkg = c.pkg.Imports()[pkgPath] + } + c.schemaRequester.NeedSchemaFor(TypeIdent{ + Package: pkg, + Name: typeName, + }) +} + +// infoToSchema creates a schema for the type in the given set of type information. +func infoToSchema(ctx *schemaContext) *apiext.JSONSchemaProps { + return typeToSchema(ctx, ctx.info.RawSpec.Type) +} + +// applyMarkers applies schema markers to the given schema, respecting "apply first" markers. +func applyMarkers(ctx *schemaContext, markerSet markers.MarkerValues, props *apiext.JSONSchemaProps, node ast.Node) { + // apply "apply first" markers first... + for _, markerValues := range markerSet { + for _, markerValue := range markerValues { + if _, isApplyFirst := markerValue.(applyFirstMarker); !isApplyFirst { + continue + } + + schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) + if !isSchemaMarker { + continue + } + + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } + } + + // ...then the rest of the markers + for _, markerValues := range markerSet { + for _, markerValue := range markerValues { + if _, isApplyFirst := markerValue.(applyFirstMarker); isApplyFirst { + // skip apply-first markers, which were already applied + continue + } + + schemaMarker, isSchemaMarker := markerValue.(SchemaMarker) + if !isSchemaMarker { + continue + } + if err := schemaMarker.ApplyToSchema(props); err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err /* an okay guess */, node)) + } + } + } +} + +// typeToSchema creates a schema for the given AST type. +func typeToSchema(ctx *schemaContext, rawType ast.Expr) *apiext.JSONSchemaProps { + var props *apiext.JSONSchemaProps + switch expr := rawType.(type) { + case *ast.Ident: + props = localNamedToSchema(ctx, expr) + case *ast.SelectorExpr: + props = namedToSchema(ctx, expr) + case *ast.ArrayType: + props = arrayToSchema(ctx, expr) + case *ast.MapType: + props = mapToSchema(ctx, expr) + case *ast.StarExpr: + props = typeToSchema(ctx, expr.X) + case *ast.StructType: + props = structToSchema(ctx, expr) + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unsupported AST kind %T", expr), rawType)) + // NB(directxman12): we explicitly don't handle interfaces + return &apiext.JSONSchemaProps{} + } + + props.Description = ctx.info.Doc + + applyMarkers(ctx, ctx.info.Markers, props, rawType) + + return props +} + +// qualifiedName constructs a JSONSchema-safe qualified name for a type +// (`` or `~0`, where `` +// is the package path with `/` replaced by `~1`, according to JSONPointer +// escapes). +func qualifiedName(pkgName, typeName string) string { + if pkgName != "" { + return strings.Replace(pkgName, "/", "~1", -1) + "~0" + typeName + } + return typeName +} + +// TypeRefLink creates a definition link for the given type and package. +func TypeRefLink(pkgName, typeName string) string { + return defPrefix + qualifiedName(pkgName, typeName) +} + +// localNamedToSchema creates a schema (ref) for a *potentially* local type reference +// (could be external from a dot-import). +func localNamedToSchema(ctx *schemaContext, ident *ast.Ident) *apiext.JSONSchemaProps { + typeInfo := ctx.pkg.TypesInfo.TypeOf(ident) + if typeInfo == types.Typ[types.Invalid] { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", ident.Name), ident)) + return &apiext.JSONSchemaProps{} + } + if basicInfo, isBasic := typeInfo.(*types.Basic); isBasic { + typ, fmt, err := builtinToType(basicInfo) + if err != nil { + ctx.pkg.AddError(loader.ErrFromNode(err, ident)) + } + return &apiext.JSONSchemaProps{ + Type: typ, + Format: fmt, + } + } + // NB(directxman12): if there are dot imports, this might be an external reference, + // so use typechecking info to get the actual object + typeNameInfo := typeInfo.(*types.Named).Obj() + pkg := typeNameInfo.Pkg() + pkgPath := loader.NonVendorPath(pkg.Path()) + if pkg == ctx.pkg.Types { + pkgPath = "" + } + ctx.requestSchema(pkgPath, typeNameInfo.Name()) + link := TypeRefLink(pkgPath, typeNameInfo.Name()) + return &apiext.JSONSchemaProps{ + Ref: &link, + } +} + +// namedSchema creates a schema (ref) for an explicitly external type reference. +func namedToSchema(ctx *schemaContext, named *ast.SelectorExpr) *apiext.JSONSchemaProps { + typeInfoRaw := ctx.pkg.TypesInfo.TypeOf(named) + if typeInfoRaw == types.Typ[types.Invalid] { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %v.%s", named.X, named.Sel.Name), named)) + return &apiext.JSONSchemaProps{} + } + typeInfo := typeInfoRaw.(*types.Named) + typeNameInfo := typeInfo.Obj() + nonVendorPath := loader.NonVendorPath(typeNameInfo.Pkg().Path()) + ctx.requestSchema(nonVendorPath, typeNameInfo.Name()) + link := TypeRefLink(nonVendorPath, typeNameInfo.Name()) + return &apiext.JSONSchemaProps{ + Ref: &link, + } + // NB(directxman12): we special-case things like resource.Quantity during the "collapse" phase. +} + +// arrayToSchema creates a schema for the items of the given array, dealing appropriately +// with the special `[]byte` type (according to OpenAPI standards). +func arrayToSchema(ctx *schemaContext, array *ast.ArrayType) *apiext.JSONSchemaProps { + eltType := ctx.pkg.TypesInfo.TypeOf(array.Elt) + if eltType == byteType && array.Len == nil { + // byte slices are represented as base64-encoded strings + // (the format is defined in OpenAPI v3, but not JSON Schema) + return &apiext.JSONSchemaProps{ + Type: "string", + Format: "byte", + } + } + // TODO(directxman12): backwards-compat would require access to markers from base info + items := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), array.Elt) + + return &apiext.JSONSchemaProps{ + Type: "array", + Items: &apiext.JSONSchemaPropsOrArray{Schema: items}, + } +} + +// mapToSchema creates a schema for items of the given map. Key types must eventually resolve +// to string (other types aren't allowed by JSON, and thus the kubernetes API standards). +func mapToSchema(ctx *schemaContext, mapType *ast.MapType) *apiext.JSONSchemaProps { + keyInfo := ctx.pkg.TypesInfo.TypeOf(mapType.Key) + // check that we've got a type that actually corresponds to a string + for keyInfo != nil { + switch typedKey := keyInfo.(type) { + case *types.Basic: + if typedKey.Info()&types.IsString == 0 { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) + return &apiext.JSONSchemaProps{} + } + keyInfo = nil // stop iterating + case *types.Named: + keyInfo = typedKey.Underlying() + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map keys must be strings, not %s", keyInfo.String()), mapType.Key)) + return &apiext.JSONSchemaProps{} + } + } + + // TODO(directxman12): backwards-compat would require access to markers from base info + var valSchema *apiext.JSONSchemaProps + switch val := mapType.Value.(type) { + case *ast.Ident: + valSchema = localNamedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + case *ast.SelectorExpr: + valSchema = namedToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + case *ast.ArrayType: + valSchema = arrayToSchema(ctx.ForInfo(&markers.TypeInfo{}), val) + if valSchema.Type == "array" && valSchema.Items.Schema.Type != "string" { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map values must be a named type, not %T", mapType.Value), mapType.Value)) + return &apiext.JSONSchemaProps{} + } + default: + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("map values must be a named type, not %T", mapType.Value), mapType.Value)) + return &apiext.JSONSchemaProps{} + } + + return &apiext.JSONSchemaProps{ + Type: "object", + AdditionalProperties: &apiext.JSONSchemaPropsOrBool{ + Schema: valSchema, + Allows: true, /* set automatically by serialization, but useful for testing */ + }, + } +} + +// structToSchema creates a schema for the given struct. Embedded fields are placed in AllOf, +// and can be flattened later with a Flattener. +func structToSchema(ctx *schemaContext, structType *ast.StructType) *apiext.JSONSchemaProps { + props := &apiext.JSONSchemaProps{ + Type: "object", + Properties: make(map[string]apiext.JSONSchemaProps), + } + + if ctx.info.RawSpec.Type != structType { + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("encountered non-top-level struct (possibly embedded), those aren't allowed"), structType)) + return props + } + + for _, field := range ctx.info.Fields { + jsonTag, hasTag := field.Tag.Lookup("json") + if !hasTag { + // if the field doesn't have a JSON tag, it doesn't belong in output (and shouldn't exist in a serialized type) + ctx.pkg.AddError(loader.ErrFromNode(fmt.Errorf("encountered struct field %q without JSON tag in type %q", field.Name, ctx.info.Name), field.RawField)) + continue + } + jsonOpts := strings.Split(jsonTag, ",") + if len(jsonOpts) == 1 && jsonOpts[0] == "-" { + // skipped fields have the tag "-" (note that "-," means the field is named "-") + continue + } + + inline := false + omitEmpty := false + for _, opt := range jsonOpts[1:] { + switch opt { + case "inline": + inline = true + case "omitempty": + omitEmpty = true + } + } + fieldName := jsonOpts[0] + inline = inline || fieldName == "" // anonymous fields are inline fields in YAML/JSON + + // if no default required mode is set, default to required + defaultMode := "required" + if ctx.PackageMarkers.Get("kubebuilder:validation:Optional") != nil { + defaultMode = "optional" + } + + switch defaultMode { + // if this package isn't set to optional default... + case "required": + // ...everything that's not inline, omitempty, or explicitly optional is required + if !inline && !omitEmpty && field.Markers.Get("kubebuilder:validation:Optional") == nil && field.Markers.Get("optional") == nil { + props.Required = append(props.Required, fieldName) + } + + // if this package isn't set to required default... + case "optional": + // ...everything that isn't explicitly required is optional + if field.Markers.Get("kubebuilder:validation:Required") != nil { + props.Required = append(props.Required, fieldName) + } + } + + propSchema := typeToSchema(ctx.ForInfo(&markers.TypeInfo{}), field.RawField.Type) + propSchema.Description = field.Doc + + applyMarkers(ctx, field.Markers, propSchema, field.RawField) + + if inline { + props.AllOf = append(props.AllOf, *propSchema) + continue + } + + props.Properties[fieldName] = *propSchema + } + + return props +} + +// builtinToType converts builtin basic types to their equivalent JSON schema form. +// It *only* handles types allowed by the kubernetes API standards. Floats are not +// allowed. +func builtinToType(basic *types.Basic) (typ string, format string, err error) { + // NB(directxman12): formats from OpenAPI v3 are slightly different than those defined + // in JSONSchema. This'll use the OpenAPI v3 ones, since they're useful for bounding our + // non-string types. + basicInfo := basic.Info() + switch { + case basicInfo&types.IsBoolean != 0: + typ = "boolean" + case basicInfo&types.IsString != 0: + typ = "string" + case basicInfo&types.IsInteger != 0: + typ = "integer" + default: + // NB(directxman12): floats are *NOT* allowed in kubernetes APIs + return "", "", fmt.Errorf("unsupported type %q", basic.String()) + } + + switch basic.Kind() { + case types.Int32, types.Uint32: + format = "int32" + case types.Int64, types.Uint64: + format = "int64" + } + + return typ, format, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go new file mode 100644 index 000000000..fc12c528c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/schema_visitor.go @@ -0,0 +1,104 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package crd + +import ( + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" +) + +// SchemaVisitor walks the nodes of a schema. +type SchemaVisitor interface { + // Visit is called for each schema node. If it returns a visitor, + // the visitor will be called on each direct child node, and then + // this visitor will be called again with `nil` to indicate that + // all children have been visited. If a nil visitor is returned, + // children are not visited. + Visit(schema *apiext.JSONSchemaProps) SchemaVisitor +} + +// EditSchema walks the given schema using the given visitor. Actual +// pointers to each schema node are passed to the visitor, so any changes +// made by the visitor will be reflected to the passed-in schema. +func EditSchema(schema *apiext.JSONSchemaProps, visitor SchemaVisitor) { + walker := schemaWalker{visitor: visitor} + walker.walkSchema(schema) +} + +// schemaWalker knows how to walk the schema, saving modifications +// made by the given visitor. +type schemaWalker struct { + visitor SchemaVisitor +} + +// walkSchema walks the given schema, saving modifications made by the +// visitor (this is as simple as passing a pointer in most cases, +// but special care needs to be taken to persist with maps). +func (w schemaWalker) walkSchema(schema *apiext.JSONSchemaProps) { + subVisitor := w.visitor.Visit(schema) + if subVisitor == nil { + return + } + defer subVisitor.Visit(nil) + + subWalker := schemaWalker{visitor: subVisitor} + if schema.Items != nil { + subWalker.walkPtr(schema.Items.Schema) + subWalker.walkSlice(schema.Items.JSONSchemas) + } + subWalker.walkSlice(schema.AllOf) + subWalker.walkSlice(schema.OneOf) + subWalker.walkSlice(schema.AnyOf) + subWalker.walkPtr(schema.Not) + subWalker.walkMap(schema.Properties) + if schema.AdditionalProperties != nil { + subWalker.walkPtr(schema.AdditionalProperties.Schema) + } + subWalker.walkMap(schema.PatternProperties) + for name, dep := range schema.Dependencies { + subWalker.walkPtr(dep.Schema) + schema.Dependencies[name] = dep + } + if schema.AdditionalItems != nil { + subWalker.walkPtr(schema.AdditionalItems.Schema) + } + subWalker.walkMap(schema.Definitions) +} + +// walkMap walks over values of the given map, saving changes to them. +func (w schemaWalker) walkMap(defs map[string]apiext.JSONSchemaProps) { + for name, def := range defs { + w.walkSchema(&def) + // make sure the edits actually go through since we can't + // take a reference to the value in the map + defs[name] = def + } +} + +// walkSlice walks over items of the given slice. +func (w schemaWalker) walkSlice(defs []apiext.JSONSchemaProps) { + for i := range defs { + w.walkSchema(&defs[i]) + } +} + +// walkPtr walks over the contents of the given pointer, if it's not nil. +func (w schemaWalker) walkPtr(def *apiext.JSONSchemaProps) { + if def == nil { + return + } + w.walkSchema(def) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go new file mode 100644 index 000000000..e00349a41 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/spec.go @@ -0,0 +1,174 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package crd + +import ( + "fmt" + "sort" + "strings" + + "github.com/gobuffalo/flect" + + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// SpecMarker is a marker that knows how to apply itself to a particular +// version in a CRD. +type SpecMarker interface { + // ApplyToCRD applies this marker to the given CRD, in the given version + // within that CRD. It's called after everything else in the CRD is populated. + ApplyToCRD(crd *apiext.CustomResourceDefinitionSpec, version string) error +} + +// NeedCRDFor requests the full CRD for the given group-kind. It requires +// that the packages containing the Go structs for that CRD have already +// been loaded with NeedPackage. +func (p *Parser) NeedCRDFor(groupKind schema.GroupKind, maxDescLen *int) { + p.init() + + if _, exists := p.CustomResourceDefinitions[groupKind]; exists { + return + } + + var packages []*loader.Package + for pkg, gv := range p.GroupVersions { + if gv.Group != groupKind.Group { + continue + } + packages = append(packages, pkg) + } + + defaultPlural := flect.Pluralize(strings.ToLower(groupKind.Kind)) + crd := apiext.CustomResourceDefinition{ + TypeMeta: metav1.TypeMeta{ + APIVersion: apiext.SchemeGroupVersion.String(), + Kind: "CustomResourceDefinition", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: defaultPlural + "." + groupKind.Group, + }, + Spec: apiext.CustomResourceDefinitionSpec{ + Group: groupKind.Group, + Names: apiext.CustomResourceDefinitionNames{ + Kind: groupKind.Kind, + ListKind: groupKind.Kind + "List", + Plural: defaultPlural, + Singular: strings.ToLower(groupKind.Kind), + }, + Scope: apiext.NamespaceScoped, + }, + } + + for _, pkg := range packages { + typeIdent := TypeIdent{Package: pkg, Name: groupKind.Kind} + typeInfo := p.Types[typeIdent] + if typeInfo == nil { + continue + } + p.NeedFlattenedSchemaFor(typeIdent) + fullSchema := p.FlattenedSchemata[typeIdent] + fullSchema = *fullSchema.DeepCopy() // don't mutate the cache (we might be truncating description, etc) + if maxDescLen != nil { + TruncateDescription(&fullSchema, *maxDescLen) + } + ver := apiext.CustomResourceDefinitionVersion{ + Name: p.GroupVersions[pkg].Version, + Served: true, + Schema: &apiext.CustomResourceValidation{ + OpenAPIV3Schema: &fullSchema, // fine to take a reference since we deepcopy above + }, + } + crd.Spec.Versions = append(crd.Spec.Versions, ver) + } + + // markers are applied *after* initial generation of objects + for _, pkg := range packages { + typeIdent := TypeIdent{Package: pkg, Name: groupKind.Kind} + typeInfo := p.Types[typeIdent] + if typeInfo == nil { + continue + } + ver := p.GroupVersions[pkg].Version + + for _, markerVals := range typeInfo.Markers { + for _, val := range markerVals { + crdMarker, isCrdMarker := val.(SpecMarker) + if !isCrdMarker { + continue + } + if err := crdMarker.ApplyToCRD(&crd.Spec, ver); err != nil { + pkg.AddError(loader.ErrFromNode(err /* an okay guess */, typeInfo.RawSpec)) + } + } + } + } + + // fix the name if the plural was changed (this is the form the name *has* to take, so no harm in changing it). + crd.Name = crd.Spec.Names.Plural + "." + groupKind.Group + + // nothing to actually write + if len(crd.Spec.Versions) == 0 { + return + } + + // it is necessary to make sure the order of CRD versions in crd.Spec.Versions is stable and explicitly set crd.Spec.Version. + // Otherwise, crd.Spec.Version may point to different CRD versions across different runs. + sort.Slice(crd.Spec.Versions, func(i, j int) bool { return crd.Spec.Versions[i].Name < crd.Spec.Versions[j].Name }) + + // make sure we have *a* storage version + // (default it if we only have one, otherwise, bail) + if len(crd.Spec.Versions) == 1 { + crd.Spec.Versions[0].Storage = true + } + + hasStorage := false + for _, ver := range crd.Spec.Versions { + if ver.Storage { + hasStorage = true + break + } + } + if !hasStorage { + // just add the error to the first relevant package for this CRD, + // since there's no specific error location + packages[0].AddError(fmt.Errorf("CRD for %s has no storage version", groupKind)) + } + + served := false + for _, ver := range crd.Spec.Versions { + if ver.Served { + served = true + break + } + } + if !served { + // just add the error to the first relevant package for this CRD, + // since there's no specific error location + packages[0].AddError(fmt.Errorf("CRD for %s with version(s) %v does not serve any version", groupKind, crd.Spec.Versions)) + } + + // NB(directxman12): CRD's status doesn't have omitempty markers, which means things + // get serialized as null, which causes the validator to freak out. Manually set + // these to empty till we get a better solution. + crd.Status.Conditions = []apiext.CustomResourceDefinitionCondition{} + crd.Status.StoredVersions = []string{} + + p.CustomResourceDefinitions[groupKind] = crd +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go new file mode 100644 index 000000000..c770a3087 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/crd/zz_generated.markerhelp.go @@ -0,0 +1,53 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package crd + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates CustomResourceDefinition objects.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "TrivialVersions": markers.DetailedHelp{ + Summary: "indicates that we should produce a single-version CRD. ", + Details: "Single \"trivial-version\" CRDs are compatible with older (pre 1.13) Kubernetes API servers. The storage version's schema will be used as the CRD's schema. \n Only works with the v1beta1 CRD version.", + }, + "PreserveUnknownFields": markers.DetailedHelp{ + Summary: "indicates whether or not we should turn off pruning. ", + Details: "Left unspecified, it'll default to true when only a v1beta1 CRD is generated (to preserve compatibility with older versions of this tool), or false otherwise. \n It's required to be false for v1 CRDs.", + }, + "MaxDescLen": markers.DetailedHelp{ + Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema. ", + Details: "0 indicates drop the description for all fields completely. n indicates limit the description to at most n characters and truncate the description to closest sentence boundary if it exceeds n characters.", + }, + "CRDVersions": markers.DetailedHelp{ + Summary: "specifies the target API versions of the CRD type itself to generate. Defaults to v1beta1. ", + Details: "The first version listed will be assumed to be the \"default\" version and will not get a version suffix in the output filename. \n You'll need to use \"v1\" to get support for features like defaulting, along with an API server that supports it (Kubernetes 1.16+).", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go new file mode 100644 index 000000000..f4200f2fc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package deepcopy generates DeepCopy, DeepCopyInto, and DeepCopyObject +// implementations for types. +// +// It's ported from k8s.io/code-generator's / k8s.io/gengo's deepcopy-gen, +// but it's scoped specifically to runtime.Object and skips support for +// deepcopying interfaces, which aren't handled in CRDs anyway. +package deepcopy diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go new file mode 100644 index 000000000..a10c70589 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/gen.go @@ -0,0 +1,300 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deepcopy + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "io" + "sort" + "strings" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// NB(directxman12): markers.LoadRoots ignores autogenerated code via a build tag +// so any time we check for existing deepcopy functions, we only seen manually written ones. + +const ( + runtimeObjPath = "k8s.io/apimachinery/pkg/runtime.Object" +) + +var ( + enablePkgMarker = markers.Must(markers.MakeDefinition("kubebuilder:object:generate", markers.DescribesPackage, false)) + enableTypeMarker = markers.Must(markers.MakeDefinition("kubebuilder:object:generate", markers.DescribesType, false)) + isObjectMarker = markers.Must(markers.MakeDefinition("kubebuilder:object:root", markers.DescribesType, false)) + + legacyEnablePkgMarker = markers.Must(markers.MakeDefinition("k8s:deepcopy-gen", markers.DescribesPackage, markers.RawArguments(nil))) + legacyEnableTypeMarker = markers.Must(markers.MakeDefinition("k8s:deepcopy-gen", markers.DescribesType, markers.RawArguments(nil))) + legacyIsObjectMarker = markers.Must(markers.MakeDefinition("k8s:deepcopy-gen:interfaces", markers.DescribesType, "")) +) + +// +controllertools:marker:generateHelp + +// Generator generates code containing DeepCopy, DeepCopyInto, and +// DeepCopyObject method implementations. +type Generator struct { + // HeaderFile specifies the header text (e.g. license) to prepend to generated files. + HeaderFile string `marker:",optional"` + // Year specifies the year to substitute for " YEAR" in the header file. + Year string `marker:",optional"` +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := markers.RegisterAll(into, + enablePkgMarker, legacyEnablePkgMarker, enableTypeMarker, + legacyEnableTypeMarker, isObjectMarker, legacyIsObjectMarker); err != nil { + return err + } + into.AddHelp(enablePkgMarker, + markers.SimpleHelp("object", "enables or disables object interface & deepcopy implementation generation for this package")) + into.AddHelp( + enableTypeMarker, markers.SimpleHelp("object", "overrides enabling or disabling deepcopy generation for this type")) + into.AddHelp(isObjectMarker, + markers.SimpleHelp("object", "enables object interface implementation generation for this type")) + + into.AddHelp(legacyEnablePkgMarker, + markers.DeprecatedHelp(enablePkgMarker.Name, "object", "enables or disables object interface & deepcopy implementation generation for this package")) + into.AddHelp(legacyEnableTypeMarker, + markers.DeprecatedHelp(enableTypeMarker.Name, "object", "overrides enabling or disabling deepcopy generation for this type")) + into.AddHelp(legacyIsObjectMarker, + markers.DeprecatedHelp(isObjectMarker.Name, "object", "enables object interface implementation generation for this type")) + return nil +} + +func enabledOnPackage(col *markers.Collector, pkg *loader.Package) (bool, error) { + pkgMarkers, err := markers.PackageMarkers(col, pkg) + if err != nil { + return false, err + } + pkgMarker := pkgMarkers.Get(enablePkgMarker.Name) + if pkgMarker != nil { + return pkgMarker.(bool), nil + } + legacyMarker := pkgMarkers.Get(legacyEnablePkgMarker.Name) + if legacyMarker != nil { + legacyMarkerVal := string(legacyMarker.(markers.RawArguments)) + firstArg := strings.Split(legacyMarkerVal, ",")[0] + return firstArg == "package", nil + } + + return false, nil +} + +func enabledOnType(allTypes bool, info *markers.TypeInfo) bool { + if typeMarker := info.Markers.Get(enableTypeMarker.Name); typeMarker != nil { + return typeMarker.(bool) + } + legacyMarker := info.Markers.Get(legacyEnableTypeMarker.Name) + if legacyMarker != nil { + legacyMarkerVal := string(legacyMarker.(markers.RawArguments)) + return legacyMarkerVal == "true" + } + return allTypes || genObjectInterface(info) +} + +func genObjectInterface(info *markers.TypeInfo) bool { + objectEnabled := info.Markers.Get(isObjectMarker.Name) + if objectEnabled != nil { + return objectEnabled.(bool) + } + + for _, legacyEnabled := range info.Markers[legacyIsObjectMarker.Name] { + if legacyEnabled == runtimeObjPath { + return true + } + } + return false +} + +func (d Generator) Generate(ctx *genall.GenerationContext) error { + var headerText string + + if d.HeaderFile != "" { + headerBytes, err := ctx.ReadFile(d.HeaderFile) + if err != nil { + return err + } + headerText = string(headerBytes) + } + headerText = strings.ReplaceAll(headerText, " YEAR", " "+d.Year) + + objGenCtx := ObjectGenCtx{ + Collector: ctx.Collector, + Checker: ctx.Checker, + HeaderText: headerText, + } + + for _, root := range ctx.Roots { + outContents := objGenCtx.GenerateForPackage(root) + if outContents == nil { + continue + } + + writeOut(ctx, root, outContents) + } + + return nil +} + +// ObjectGenCtx contains the common info for generating deepcopy implementations. +// It mostly exists so that generating for a package can be easily tested without +// requiring a full set of output rules, etc. +type ObjectGenCtx struct { + Collector *markers.Collector + Checker *loader.TypeChecker + HeaderText string +} + +// writeHeader writes out the build tag, package declaration, and imports +func writeHeader(pkg *loader.Package, out io.Writer, packageName string, imports *importsList, headerText string) { + // NB(directxman12): blank line after build tags to distinguish them from comments + _, err := fmt.Fprintf(out, `// +build !ignore_autogenerated + +%[3]s + +// Code generated by controller-gen. DO NOT EDIT. + +package %[1]s + +import ( +%[2]s +) + +`, packageName, strings.Join(imports.ImportSpecs(), "\n"), headerText) + if err != nil { + pkg.AddError(err) + } + +} + +// GenerateForPackage generates DeepCopy and runtime.Object implementations for +// types in the given package, writing the formatted result to given writer. +// May return nil if source could not be generated. +func (ctx *ObjectGenCtx) GenerateForPackage(root *loader.Package) []byte { + allTypes, err := enabledOnPackage(ctx.Collector, root) + if err != nil { + root.AddError(err) + return nil + } + + ctx.Checker.Check(root, func(node ast.Node) bool { + // ignore interfaces + _, isIface := node.(*ast.InterfaceType) + return !isIface + }) + + root.NeedTypesInfo() + + byType := make(map[string][]byte) + imports := &importsList{ + byPath: make(map[string]string), + byAlias: make(map[string]string), + pkg: root, + } + // avoid confusing aliases by "reserving" the root package's name as an alias + imports.byAlias[root.Name] = "" + + if err := markers.EachType(ctx.Collector, root, func(info *markers.TypeInfo) { + outContent := new(bytes.Buffer) + + // copy when nabled for all types and not disabled, or enabled + // specifically on this type + if !enabledOnType(allTypes, info) { + return + } + + // avoid copying non-exported types, etc + if !shouldBeCopied(root, info) { + return + } + + copyCtx := ©MethodMaker{ + pkg: root, + importsList: imports, + codeWriter: &codeWriter{out: outContent}, + } + + copyCtx.GenerateMethodsFor(root, info) + + outBytes := outContent.Bytes() + if len(outBytes) > 0 { + byType[info.Name] = outBytes + } + }); err != nil { + root.AddError(err) + return nil + } + + if len(byType) == 0 { + return nil + } + + outContent := new(bytes.Buffer) + writeHeader(root, outContent, root.Name, imports, ctx.HeaderText) + writeMethods(root, outContent, byType) + + outBytes := outContent.Bytes() + formattedBytes, err := format.Source(outBytes) + if err != nil { + root.AddError(err) + // we still write the invalid source to disk to figure out what went wrong + } else { + outBytes = formattedBytes + } + + return outBytes +} + +// writeMethods writes each method to the file, sorted by type name. +func writeMethods(pkg *loader.Package, out io.Writer, byType map[string][]byte) { + sortedNames := make([]string, 0, len(byType)) + for name := range byType { + sortedNames = append(sortedNames, name) + } + sort.Strings(sortedNames) + + for _, name := range sortedNames { + _, err := out.Write(byType[name]) + if err != nil { + pkg.AddError(err) + } + } +} + +// writeFormatted outputs the given code, after gofmt-ing it. If we couldn't gofmt, +// we write the unformatted code for debugging purposes. +func writeOut(ctx *genall.GenerationContext, root *loader.Package, outBytes []byte) { + outputFile, err := ctx.Open(root, "zz_generated.deepcopy.go") + if err != nil { + root.AddError(err) + return + } + defer outputFile.Close() + n, err := outputFile.Write(outBytes) + if err != nil { + root.AddError(err) + return + } + if n < len(outBytes) { + root.AddError(io.ErrShortWrite) + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go new file mode 100644 index 000000000..1c92e48a0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/traverse.go @@ -0,0 +1,812 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deepcopy + +import ( + "fmt" + "go/ast" + "go/types" + "io" + "path" + "strings" + "unicode" + "unicode/utf8" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// NB(directxman12): This code is a bit of a byzantine mess. +// I've tried to clean it up a bit from the original in deepcopy-gen, +// but parts remain a bit convoluted. Exercise caution when changing. +// It's perhaps a tad over-commented now, but better safe than sorry. +// It also seriously needs auditing for sanity -- there's parts where we +// copy the original deepcopy-gen's output just to be safe, but some of that +// could be simplified away if we're careful. + +// codeWriter assists in writing out Go code lines and blocks to a writer. +type codeWriter struct { + out io.Writer +} + +// Line writes a single line. +func (c *codeWriter) Line(line string) { + fmt.Fprintln(c.out, line) +} + +// Linef writes a single line with formatting (as per fmt.Sprintf). +func (c *codeWriter) Linef(line string, args ...interface{}) { + fmt.Fprintf(c.out, line+"\n", args...) +} + +// If writes an if statement with the given setup/condition clause, executing +// the given function to write the contents of the block. +func (c *codeWriter) If(setup string, block func()) { + c.Linef("if %s {", setup) + block() + c.Line("}") +} + +// If writes if and else statements with the given setup/condition clause, executing +// the given functions to write the contents of the blocks. +func (c *codeWriter) IfElse(setup string, ifBlock func(), elseBlock func()) { + c.Linef("if %s {", setup) + ifBlock() + c.Line("} else {") + elseBlock() + c.Line("}") +} + +// For writes an for statement with the given setup/condition clause, executing +// the given function to write the contents of the block. +func (c *codeWriter) For(setup string, block func()) { + c.Linef("for %s {", setup) + block() + c.Line("}") +} + +// importsList keeps track of required imports, automatically assigning aliases +// to import statement. +type importsList struct { + byPath map[string]string + byAlias map[string]string + + pkg *loader.Package +} + +// NeedImport marks that the given package is needed in the list of imports, +// returning the ident (import alias) that should be used to reference the package. +func (l *importsList) NeedImport(importPath string) string { + // we get an actual path from Package, which might include venddored + // packages if running on a package in vendor. + if ind := strings.LastIndex(importPath, "/vendor/"); ind != -1 { + importPath = importPath[ind+8: /* len("/vendor/") */] + } + + // check to see if we've already assigned an alias, and just return that. + alias, exists := l.byPath[importPath] + if exists { + return alias + } + + // otherwise, calculate an import alias by joining path parts till we get something unique + restPath, nextWord := path.Split(importPath) + + for otherPath, exists := "", true; exists && otherPath != importPath; otherPath, exists = l.byAlias[alias] { + if restPath == "" { + // do something else to disambiguate if we're run out of parts and + // still have duplicates, somehow + alias += "x" + } + + // can't have a first digit, per Go identifier rules, so just skip them + for firstRune, runeLen := utf8.DecodeRuneInString(nextWord); unicode.IsDigit(firstRune); firstRune, runeLen = utf8.DecodeRuneInString(nextWord) { + nextWord = nextWord[runeLen:] + } + + // make a valid identifier by replacing "bad" characters with underscores + nextWord = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' + }, nextWord) + + alias = nextWord + alias + if len(restPath) > 0 { + restPath, nextWord = path.Split(restPath[:len(restPath)-1] /* chop off final slash */) + } + } + + l.byPath[importPath] = alias + l.byAlias[alias] = importPath + return alias +} + +// ImportSpecs returns a string form of each import spec +// (i.e. `alias "path/to/import"). Aliases are only present +// when they don't match the package name. +func (l *importsList) ImportSpecs() []string { + res := make([]string, 0, len(l.byPath)) + for importPath, alias := range l.byPath { + pkg := l.pkg.Imports()[importPath] + if pkg != nil && pkg.Name == alias { + // don't print if alias is the same as package name + // (we've already taken care of duplicates). + res = append(res, fmt.Sprintf("%q", importPath)) + } else { + res = append(res, fmt.Sprintf("%s %q", alias, importPath)) + } + } + return res +} + +// namingInfo holds package and syntax for referencing a field, type, +// etc. It's used to allow lazily marking import usage. +// You should generally retrieve the syntax using Syntax. +type namingInfo struct { + // typeInfo is the type being named. + typeInfo types.Type + nameOverride string +} + +// Syntax calculates the code representation of the given type or name, +// and marks that is used (potentially marking an import as used). +func (n *namingInfo) Syntax(basePkg *loader.Package, imports *importsList) string { + if n.nameOverride != "" { + return n.nameOverride + } + + // NB(directxman12): typeInfo.String gets us most of the way there, + // but fails (for us) on named imports, since it uses the full package path. + switch typeInfo := n.typeInfo.(type) { + case *types.Named: + // register that we need an import for this type, + // so we can get the appropriate alias to use. + typeName := typeInfo.Obj() + otherPkg := typeName.Pkg() + if otherPkg == basePkg.Types { + // local import + return typeName.Name() + } + alias := imports.NeedImport(loader.NonVendorPath(otherPkg.Path())) + return alias + "." + typeName.Name() + case *types.Basic: + return typeInfo.String() + case *types.Pointer: + return "*" + (&namingInfo{typeInfo: typeInfo.Elem()}).Syntax(basePkg, imports) + case *types.Slice: + return "[]" + (&namingInfo{typeInfo: typeInfo.Elem()}).Syntax(basePkg, imports) + case *types.Map: + return fmt.Sprintf( + "map[%s]%s", + (&namingInfo{typeInfo: typeInfo.Key()}).Syntax(basePkg, imports), + (&namingInfo{typeInfo: typeInfo.Elem()}).Syntax(basePkg, imports)) + default: + basePkg.AddError(fmt.Errorf("name requested for invalid type %s", typeInfo)) + return typeInfo.String() + } +} + +// copyMethodMakers makes DeepCopy (and related) methods for Go types, +// writing them to its codeWriter. +type copyMethodMaker struct { + pkg *loader.Package + *importsList + *codeWriter +} + +// GenerateMethodsFor makes DeepCopy, DeepCopyInto, and DeepCopyObject methods +// for the given type, when appropriate +func (c *copyMethodMaker) GenerateMethodsFor(root *loader.Package, info *markers.TypeInfo) { + typeInfo := root.TypesInfo.TypeOf(info.RawSpec.Name) + if typeInfo == types.Typ[types.Invalid] { + root.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", info.Name), info.RawSpec)) + } + + // figure out if we need to use a pointer receiver -- most types get a pointer receiver, + // except those that are aliases to types that are already pass-by-reference (pointers, + // interfaces. maps, slices). + ptrReceiver := usePtrReceiver(typeInfo) + + hasManualDeepCopyInto := hasDeepCopyIntoMethod(root, typeInfo) + hasManualDeepCopy, deepCopyOnPtr := hasDeepCopyMethod(root, typeInfo) + + // only generate each method if it hasn't been implemented. + if !hasManualDeepCopyInto { + c.Line("// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.") + if ptrReceiver { + c.Linef("func (in *%s) DeepCopyInto(out *%s) {", info.Name, info.Name) + } else { + c.Linef("func (in %s) DeepCopyInto(out *%s) {", info.Name, info.Name) + c.Line("{in := &in") // add an extra block so that we can redefine `in` without type issues + } + + // just wrap the existing deepcopy if present + if hasManualDeepCopy { + if deepCopyOnPtr { + c.Line("clone := in.DeepCopy()") + c.Line("*out = *clone") + } else { + c.Line("*out = in.DeepCopy()") + } + } else { + c.genDeepCopyIntoBlock(&namingInfo{nameOverride: info.Name}, typeInfo) + } + + if !ptrReceiver { + c.Line("}") // close our extra "in redefinition" block + } + c.Line("}") + } + + if !hasManualDeepCopy { + // these are both straightforward, so we just template them out. + if ptrReceiver { + c.Linef(ptrDeepCopy, info.Name) + } else { + c.Linef(bareDeepCopy, info.Name) + } + + // maybe also generate DeepCopyObject, if asked. + if genObjectInterface(info) { + // we always need runtime.Object for DeepCopyObject + runtimeAlias := c.NeedImport("k8s.io/apimachinery/pkg/runtime") + if ptrReceiver { + c.Linef(ptrDeepCopyObj, info.Name, runtimeAlias) + } else { + c.Linef(bareDeepCopyObj, info.Name, runtimeAlias) + } + } + } +} + +// genDeepCopyBody generates a DeepCopyInto block for the given type. The +// block is *not* wrapped in curly braces. +func (c *copyMethodMaker) genDeepCopyIntoBlock(actualName *namingInfo, typeInfo types.Type) { + // we calculate *how* we should copy mostly based on the "eventual" type of + // a given type (i.e. the type that results from following all aliases) + last := eventualUnderlyingType(typeInfo) + + // we might hit a type that has a manual deepcopy method written on non-root types + // (this case is handled for root types in GenerateMethodFor). + // In that case (when we're not dealing with a pointer, since those need special handling + // to match 1-to-1 with k8s deepcopy-gen), just use that. + if _, isPtr := last.(*types.Pointer); !isPtr && hasAnyDeepCopyMethod(c.pkg, typeInfo) { + c.Line("*out = in.DeepCopy()") + return + } + + switch last := last.(type) { + case *types.Basic: + // basic types themselves can be "shallow" copied, so all we need + // to do is check if our *actual* type (not the underlying one) has + // a custom method implemented. + if hasMethod, _ := hasDeepCopyMethod(c.pkg, typeInfo); hasMethod { + c.Line("*out = in.DeepCopy()") + } + c.Line("*out = *in") + case *types.Map: + c.genMapDeepCopy(actualName, last) + case *types.Slice: + c.genSliceDeepCopy(actualName, last) + case *types.Struct: + c.genStructDeepCopy(actualName, last) + case *types.Pointer: + c.genPointerDeepCopy(actualName, last) + case *types.Named: + // handled via the above loop, should never happen + c.pkg.AddError(fmt.Errorf("interface type %s encountered directly, invalid condition", last)) + default: + c.pkg.AddError(fmt.Errorf("invalid type %s", last)) + } +} + +// genMapDeepCopy generates DeepCopy code for the given named type whose eventual +// type is the given map type. +func (c *copyMethodMaker) genMapDeepCopy(actualName *namingInfo, mapType *types.Map) { + // maps *must* have shallow-copiable types, since we just iterate + // through the keys, only trying to deepcopy the values. + if !fineToShallowCopy(mapType.Key()) { + c.pkg.AddError(fmt.Errorf("invalid map key type %s", mapType.Key())) + return + } + + // make our actual type (not the underlying one)... + c.Linef("*out = make(%[1]s, len(*in))", actualName.Syntax(c.pkg, c.importsList)) + + // ...and copy each element appropriately + c.For("key, val := range *in", func() { + // check if we have manually written methods, + // in which case we'll just try and use those + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, mapType.Elem()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, mapType.Elem()) + switch { + case hasDeepCopyInto || hasDeepCopy: + // use the manually-written methods + _, fieldIsPtr := mapType.Elem().(*types.Pointer) // is "out" actually a pointer + inIsPtr := resultWillBePointer(mapType.Elem(), hasDeepCopy, copyOnPtr) // does copying "in" produce a pointer + if hasDeepCopy { + // If we're calling DeepCopy, check if it's receiver needs a pointer + inIsPtr = copyOnPtr + } + if inIsPtr == fieldIsPtr { + c.Line("(*out)[key] = val.DeepCopy()") + } else if fieldIsPtr { + c.Line("{") // use a block because we use `x` as a temporary + c.Line("x := val.DeepCopy()") + c.Line("(*out)[key] = &x") + c.Line("}") + } else { + c.Line("(*out)[key] = *val.DeepCopy()") + } + case fineToShallowCopy(mapType.Elem()): + // just shallow copy types for which it's safe to do so + c.Line("(*out)[key] = val") + default: + // otherwise, we've got some kind-specific actions, + // based on the element's eventual type. + + underlyingElem := eventualUnderlyingType(mapType.Elem()) + + // if it passes by reference, let the main switch handle it + if passesByReference(underlyingElem) { + c.Linef("var outVal %[1]s", (&namingInfo{typeInfo: underlyingElem}).Syntax(c.pkg, c.importsList)) + c.IfElse("val == nil", func() { + c.Line("(*out)[key] = nil") + }, func() { + c.Line("in, out := &val, &outVal") + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: mapType.Elem()}, mapType.Elem()) + }) + c.Line("(*out)[key] = outVal") + + return + } + + // otherwise... + switch underlyingElem := underlyingElem.(type) { + case *types.Struct: + // structs will have deepcopy generated for them, so use that + c.Line("(*out)[key] = *val.DeepCopy()") + default: + c.pkg.AddError(fmt.Errorf("invalid map value type %s", underlyingElem)) + return + } + } + }) +} + +// genSliceDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given slice. +func (c *copyMethodMaker) genSliceDeepCopy(actualName *namingInfo, sliceType *types.Slice) { + underlyingElem := eventualUnderlyingType(sliceType.Elem()) + + // make the actual type (not the underlying) + c.Linef("*out = make(%[1]s, len(*in))", actualName.Syntax(c.pkg, c.importsList)) + + // check if we need to do anything special, or just copy each element appropriately + switch { + case hasAnyDeepCopyMethod(c.pkg, sliceType.Elem()): + // just use deepcopy if it's present (deepcopyinto will be filled in by our code) + c.For("i := range *in", func() { + c.Line("(*in)[i].DeepCopyInto(&(*out)[i])") + }) + case fineToShallowCopy(underlyingElem): + // shallow copy if ok + c.Line("copy(*out, *in)") + default: + // copy each element appropriately + c.For("i := range *in", func() { + // fall back to normal code for reference types or those with custom logic + if passesByReference(underlyingElem) || hasAnyDeepCopyMethod(c.pkg, sliceType.Elem()) { + c.If("(*in)[i] != nil", func() { + c.Line("in, out := &(*in)[i], &(*out)[i]") + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: sliceType.Elem()}, sliceType.Elem()) + }) + return + } + + switch underlyingElem.(type) { + case *types.Struct: + // structs will always have deepcopy + c.Linef("(*in)[i].DeepCopyInto(&(*out)[i])") + default: + c.pkg.AddError(fmt.Errorf("invalid slice element type %s", underlyingElem)) + } + }) + } +} + +// genStructDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given struct. +func (c *copyMethodMaker) genStructDeepCopy(_ *namingInfo, structType *types.Struct) { + c.Line("*out = *in") + + for i := 0; i < structType.NumFields(); i++ { + field := structType.Field(i) + + // if we have a manual deepcopy, use that + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, field.Type()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, field.Type()) + if hasDeepCopyInto || hasDeepCopy { + // NB(directxman12): yes, I know this is kind-of weird that we + // have all this special-casing here, but it's nice for testing + // purposes to be 1-to-1 with deepcopy-gen, which does all sorts of + // stuff like this (I'm pretty sure I found some codepaths that + // never execute there, because they're pretty clearly invalid + // syntax). + + _, fieldIsPtr := field.Type().(*types.Pointer) + inIsPtr := resultWillBePointer(field.Type(), hasDeepCopy, copyOnPtr) + if fieldIsPtr { + // we'll need a if block to check for nilness + // we'll let genDeepCopyIntoBlock handle the details, we just needed the setup + c.If(fmt.Sprintf("in.%s != nil", field.Name()), func() { + c.Linef("in, out := &in.%[1]s, &out.%[1]s", field.Name()) + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: field.Type()}, field.Type()) + }) + } else { + // special-case for compatibility with deepcopy-gen + if inIsPtr == fieldIsPtr { + c.Linef("out.%[1]s = in.%[1]s.DeepCopy()", field.Name()) + } else { + c.Linef("in.%[1]s.DeepCopyInto(&out.%[1]s)", field.Name()) + } + } + continue + } + + // pass-by-reference fields get delegated to the main type + underlyingField := eventualUnderlyingType(field.Type()) + if passesByReference(underlyingField) { + c.If(fmt.Sprintf("in.%s != nil", field.Name()), func() { + c.Linef("in, out := &in.%[1]s, &out.%[1]s", field.Name()) + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: field.Type()}, field.Type()) + }) + continue + } + + // otherwise... + switch underlyingField := underlyingField.(type) { + case *types.Basic: + // nothing to do, initial assignment copied this + case *types.Struct: + if fineToShallowCopy(field.Type()) { + c.Linef("out.%[1]s = in.%[1]s", field.Name()) + } else { + c.Linef("in.%[1]s.DeepCopyInto(&out.%[1]s)", field.Name()) + } + default: + c.pkg.AddError(fmt.Errorf("invalid field type %s", underlyingField)) + return + } + } +} + +// genPointerDeepCopy generates DeepCopy code for the given named type whose +// underlying type is the given struct. +func (c *copyMethodMaker) genPointerDeepCopy(_ *namingInfo, pointerType *types.Pointer) { + underlyingElem := eventualUnderlyingType(pointerType.Elem()) + + // if we have a manually written deepcopy, just use that + hasDeepCopy, copyOnPtr := hasDeepCopyMethod(c.pkg, pointerType.Elem()) + hasDeepCopyInto := hasDeepCopyIntoMethod(c.pkg, pointerType.Elem()) + if hasDeepCopyInto || hasDeepCopy { + outNeedsPtr := resultWillBePointer(pointerType.Elem(), hasDeepCopy, copyOnPtr) + if hasDeepCopy { + outNeedsPtr = copyOnPtr + } + if outNeedsPtr { + c.Line("*out = (*in).DeepCopy()") + } else { + c.Line("x := (*in).DeepCopy()") + c.Line("*out = &x") + } + return + } + + // shallow-copiable types are pretty easy + if fineToShallowCopy(underlyingElem) { + c.Linef("*out = new(%[1]s)", (&namingInfo{typeInfo: pointerType.Elem()}).Syntax(c.pkg, c.importsList)) + c.Line("**out = **in") + return + } + + // pass-by-reference types get delegated to the main switch + if passesByReference(underlyingElem) { + c.Linef("*out = new(%s)", (&namingInfo{typeInfo: underlyingElem}).Syntax(c.pkg, c.importsList)) + c.If("**in != nil", func() { + c.Line("in, out := *in, *out") + c.genDeepCopyIntoBlock(&namingInfo{typeInfo: underlyingElem}, eventualUnderlyingType(underlyingElem)) + }) + return + } + + // otherwise... + switch underlyingElem := underlyingElem.(type) { + case *types.Struct: + c.Linef("*out = new(%[1]s)", (&namingInfo{typeInfo: pointerType.Elem()}).Syntax(c.pkg, c.importsList)) + c.Line("(*in).DeepCopyInto(*out)") + default: + c.pkg.AddError(fmt.Errorf("invalid pointer element type %s", underlyingElem)) + return + } +} + +// usePtrReceiver checks if we need a pointer receiver on methods for the given type +// Pass-by-reference types don't get pointer receivers. +func usePtrReceiver(typeInfo types.Type) bool { + switch typeInfo.(type) { + case *types.Pointer: + return false + case *types.Map: + return false + case *types.Slice: + return false + case *types.Named: + return usePtrReceiver(typeInfo.Underlying()) + default: + return true + } +} + +func resultWillBePointer(typeInfo types.Type, hasDeepCopy, deepCopyOnPtr bool) bool { + // if we have a manual deepcopy, we can just check what that returns + if hasDeepCopy { + return deepCopyOnPtr + } + + // otherwise, we'll need to check its type + switch typeInfo := typeInfo.(type) { + case *types.Pointer: + // NB(directxman12): we don't have to worry about the elem having a deepcopy, + // since hasManualDeepCopy would've caught that. + + // we'll be calling on the elem, so check that + return resultWillBePointer(typeInfo.Elem(), false, false) + case *types.Map: + return false + case *types.Slice: + return false + case *types.Named: + return resultWillBePointer(typeInfo.Underlying(), false, false) + default: + return true + } +} + +// shouldBeCopied checks if we're supposed to make deepcopy methods the given type. +// +// This is the case if it's exported *and* either: +// - has a partial manual DeepCopy implementation (in which case we fill in the rest) +// - aliases to a non-basic type eventually +// - is a struct +func shouldBeCopied(pkg *loader.Package, info *markers.TypeInfo) bool { + if !ast.IsExported(info.Name) { + return false + } + + typeInfo := pkg.TypesInfo.TypeOf(info.RawSpec.Name) + if typeInfo == types.Typ[types.Invalid] { + pkg.AddError(loader.ErrFromNode(fmt.Errorf("unknown type %s", info.Name), info.RawSpec)) + return false + } + + // according to gengo, everything named is an alias, except for an alias to a pointer, + // which is just a pointer, afaict. Just roll with it. + if asPtr, isPtr := typeInfo.(*types.Named).Underlying().(*types.Pointer); isPtr { + typeInfo = asPtr + } + + lastType := typeInfo + if _, isNamed := typeInfo.(*types.Named); isNamed { + // if it has a manual deepcopy or deepcopyinto, we're fine + if hasAnyDeepCopyMethod(pkg, typeInfo) { + return true + } + + for underlyingType := typeInfo.Underlying(); underlyingType != lastType; lastType, underlyingType = underlyingType, underlyingType.Underlying() { + // if it has a manual deepcopy or deepcopyinto, we're fine + if hasAnyDeepCopyMethod(pkg, underlyingType) { + return true + } + + // aliases to other things besides basics need copy methods + // (basics can be straight-up shallow-copied) + if _, isBasic := underlyingType.(*types.Basic); !isBasic { + return true + } + } + } + + // structs are the only thing that's not a basic that's copiable by default + _, isStruct := lastType.(*types.Struct) + return isStruct +} + +// hasDeepCopyMethod checks if this type has a manual DeepCopy method and if +// the method has a pointer receiver. +func hasDeepCopyMethod(pkg *loader.Package, typeInfo types.Type) (bool, bool) { + deepCopyMethod, ind, _ := types.LookupFieldOrMethod(typeInfo, true /* check pointers too */, pkg.Types, "DeepCopy") + if len(ind) != 1 { + // ignore embedded methods + return false, false + } + if deepCopyMethod == nil { + return false, false + } + + methodSig := deepCopyMethod.Type().(*types.Signature) + if methodSig.Params() != nil && methodSig.Params().Len() != 0 { + return false, false + } + if methodSig.Results() == nil || methodSig.Results().Len() != 1 { + return false, false + } + + recvAsPtr, recvIsPtr := methodSig.Recv().Type().(*types.Pointer) + if recvIsPtr { + // NB(directxman12): the pointer type returned here isn't comparable even though they + // have the same underlying type, for some reason (probably that + // LookupFieldOrMethod calls types.NewPointer for us), so check the + // underlying values. + + resultPtr, resultIsPtr := methodSig.Results().At(0).Type().(*types.Pointer) + if !resultIsPtr { + // pointer vs non-pointer are different types + return false, false + } + + if recvAsPtr.Elem() != resultPtr.Elem() { + return false, false + } + } else if methodSig.Results().At(0).Type() != methodSig.Recv().Type() { + return false, false + } + + return true, recvIsPtr +} + +// hasDeepCopyIntoMethod checks if this type has a manual DeepCopyInto method. +func hasDeepCopyIntoMethod(pkg *loader.Package, typeInfo types.Type) bool { + deepCopyMethod, ind, _ := types.LookupFieldOrMethod(typeInfo, true /* check pointers too */, pkg.Types, "DeepCopyInto") + if len(ind) != 1 { + // ignore embedded methods + return false + } + if deepCopyMethod == nil { + return false + } + + methodSig := deepCopyMethod.Type().(*types.Signature) + if methodSig.Params() == nil || methodSig.Params().Len() != 1 { + return false + } + paramPtr, isPtr := methodSig.Params().At(0).Type().(*types.Pointer) + if !isPtr { + return false + } + if methodSig.Results() != nil && methodSig.Results().Len() != 0 { + return false + } + + if recvPtr, recvIsPtr := methodSig.Recv().Type().(*types.Pointer); recvIsPtr { + // NB(directxman12): the pointer type returned here isn't comparable even though they + // have the same underlying type, for some reason (probably that + // LookupFieldOrMethod calls types.NewPointer for us), so check the + // underlying values. + return paramPtr.Elem() == recvPtr.Elem() + } + return methodSig.Recv().Type() == paramPtr.Elem() +} + +// hasAnyDeepCopyMethod checks if the given method has DeepCopy or DeepCopyInto +// (either of which implies the other will exist eventually). +func hasAnyDeepCopyMethod(pkg *loader.Package, typeInfo types.Type) bool { + hasDeepCopy, _ := hasDeepCopyMethod(pkg, typeInfo) + return hasDeepCopy || hasDeepCopyIntoMethod(pkg, typeInfo) +} + +// eventualUnderlyingType gets the "final" type in a sequence of named aliases. +// It's effectively a shortcut for calling Underlying in a loop. +func eventualUnderlyingType(typeInfo types.Type) types.Type { + last := typeInfo + for underlying := typeInfo.Underlying(); underlying != last; last, underlying = underlying, underlying.Underlying() { + // get the actual underlying type + } + return last +} + +// fineToShallowCopy checks if a shallow-copying a type is equivalent to deepcopy-ing it. +func fineToShallowCopy(typeInfo types.Type) bool { + switch typeInfo := typeInfo.(type) { + case *types.Basic: + // basic types (int, string, etc) are always fine to shallow-copy + return true + case *types.Named: + // aliases are fine to shallow-copy as long as they resolve to a shallow-copyable type + return fineToShallowCopy(typeInfo.Underlying()) + case *types.Struct: + // structs are fine to shallow-copy if they have all shallow-copyable fields + for i := 0; i < typeInfo.NumFields(); i++ { + field := typeInfo.Field(i) + if !fineToShallowCopy(field.Type()) { + return false + } + } + return true + default: + return false + } +} + +// passesByReference checks if the given type passesByReference +// (except for interfaces, which are handled separately). +func passesByReference(typeInfo types.Type) bool { + switch typeInfo.(type) { + case *types.Slice: + return true + case *types.Map: + return true + case *types.Pointer: + return true + default: + return false + } +} + +var ( + // ptrDeepCopy is a DeepCopy for a type with an existing DeepCopyInto and a pointer receiver. + ptrDeepCopy = ` +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new %[1]s. +func (in *%[1]s) DeepCopy() *%[1]s { + if in == nil { return nil } + out := new(%[1]s) + in.DeepCopyInto(out) + return out +} +` + + // ptrDeepCopy is a DeepCopy for a type with an existing DeepCopyInto and a non-pointer receiver. + bareDeepCopy = ` +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new %[1]s. +func (in %[1]s) DeepCopy() %[1]s { + if in == nil { return nil } + out := new(%[1]s) + in.DeepCopyInto(out) + return *out +} +` + + // ptrDeepCopy is a DeepCopyObject for a type with an existing DeepCopyInto and a pointer receiver. + ptrDeepCopyObj = ` +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *%[1]s) DeepCopyObject() %[2]s.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} +` + // ptrDeepCopy is a DeepCopyObject for a type with an existing DeepCopyInto and a non-pointer receiver. + bareDeepCopyObj = ` +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in %[1]s) DeepCopyObject() %[2]s.Object { + return in.DeepCopy() +} +` +) diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go new file mode 100644 index 000000000..a04355836 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/deepcopy/zz_generated.markerhelp.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package deepcopy + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "HeaderFile": markers.DetailedHelp{ + Summary: "specifies the header text (e.g. license) to prepend to generated files.", + Details: "", + }, + "Year": markers.DetailedHelp{ + Summary: "specifies the year to substitute for \" YEAR\" in the header file.", + Details: "", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go new file mode 100644 index 000000000..df8dd7138 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/doc.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package genall defines entrypoints for generation tools to hook into and +// share the same set of parsing, typechecking, and marker information. +// +// Generators +// +// Each Generator knows how to register its markers into a central Registry, +// and then how to generate output using a Collector and some root packages. +// Each generator can be considered to be the output type of a marker, for easy +// command line parsing. +// +// Output and Input +// +// Generators output artifacts via an OutputRule. OutputRules know how to +// write output for different package-associated (code) files, as well as +// config files. Each OutputRule should also be considered to be the output +// type as a marker, for easy command-line parsing. +// +// OutputRules groups together an OutputRule per generator, plus a default +// output rule for any not explicitly specified. +// +// OutputRules are defined for stdout, file writing, and sending to /dev/null +// (useful for doing "type-checking" without actually saving the results). +// +// InputRule defines custom input loading, but its shared across all +// Generators. There's currently only a filesystem implementation. +// +// Runtime and Context +// +// Runtime maps together Generators, and constructs "contexts" which provide +// the common collector and roots, plus the output rule for that generator, and +// a handle for reading files (like boilerplate headers). +// +// It will run all associated generators, printing errors and automatically +// skipping type-checking errors (since those are commonly caused by the +// partial type-checking of loader.TypeChecker). +// +// Options +// +// The FromOptions (and associated helpers) function makes it easy to use generators +// and output rules as markers that can be parsed from the command line, producing +// a registry from command line args. +package genall diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go new file mode 100644 index 000000000..9fcc76236 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/genall.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "golang.org/x/tools/go/packages" + "sigs.k8s.io/yaml" + + "sigs.k8s.io/controller-tools/pkg/loader" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// Generators are a list of Generators. +// NB(directxman12): this is a pointer so that we can uniquely identify each +// instance of a generator, even if it's not hashable. Different *instances* +// of a generator are treated differently. +type Generators []*Generator + +// RegisterMarkers registers all markers defined by each of the Generators in +// this list into the given registry. +func (g Generators) RegisterMarkers(reg *markers.Registry) error { + for _, gen := range g { + if err := (*gen).RegisterMarkers(reg); err != nil { + return err + } + } + return nil +} + +// Generator knows how to register some set of markers, and then produce +// output artifacts based on loaded code containing those markers, +// sharing common loaded data. +type Generator interface { + // RegisterMarkers registers all markers needed by this Generator + // into the given registry. + RegisterMarkers(into *markers.Registry) error + // Generate generates artifacts produced by this marker. + // It's called *after* RegisterMarkers has been called. + Generate(*GenerationContext) error +} + +// HasHelp is some Generator, OutputRule, etc with a help method. +type HasHelp interface { + // Help returns help for this generator. + Help() *markers.DefinitionHelp +} + +// Runtime collects generators, loaded program data (Collector, root Packages), +// and I/O rules, running them together. +type Runtime struct { + // Generators are the Generators to be run by this Runtime. + Generators Generators + // GenerationContext is the base generation context that's copied + // to produce the context for each Generator. + GenerationContext + // OutputRules defines how to output artifacts for each Generator. + OutputRules OutputRules +} + +// GenerationContext defines the common information needed for each Generator +// to run. +type GenerationContext struct { + // Collector is the shared marker collector. + Collector *markers.Collector + // Roots are the base packages to be processed. + Roots []*loader.Package + // Checker is the shared partial type-checker. + Checker *loader.TypeChecker + // OutputRule describes how to output artifacts. + OutputRule + // InputRule describes how to load associated boilerplate artifacts. + // It should *not* be used to load source files. + InputRule +} + +// WriteYAML writes the given objects out, serialized as YAML, using the +// context's OutputRule. Objects are written as separate documents, separated +// from each other by `---` (as per the YAML spec). +func (g GenerationContext) WriteYAML(itemPath string, objs ...interface{}) error { + out, err := g.Open(nil, itemPath) + if err != nil { + return err + } + defer out.Close() + + for _, obj := range objs { + yamlContent, err := yaml.Marshal(obj) + if err != nil { + return err + } + n, err := out.Write(append([]byte("\n---\n"), yamlContent...)) + if err != nil { + return err + } + if n < len(yamlContent) { + return io.ErrShortWrite + } + } + + return nil +} + +// ReadFile reads the given boilerplate artifact using the context's InputRule. +func (g GenerationContext) ReadFile(path string) ([]byte, error) { + file, err := g.OpenForRead(path) + if err != nil { + return nil, err + } + defer file.Close() + return ioutil.ReadAll(file) +} + +// ForRoots produces a Runtime to run the given generators against the +// given packages. It outputs to /dev/null by default. +func (g Generators) ForRoots(rootPaths ...string) (*Runtime, error) { + roots, err := loader.LoadRoots(rootPaths...) + if err != nil { + return nil, err + } + rt := &Runtime{ + Generators: g, + GenerationContext: GenerationContext{ + Collector: &markers.Collector{ + Registry: &markers.Registry{}, + }, + Roots: roots, + InputRule: InputFromFileSystem, + Checker: &loader.TypeChecker{}, + }, + OutputRules: OutputRules{Default: OutputToNothing}, + } + if err := rt.Generators.RegisterMarkers(rt.Collector.Registry); err != nil { + return nil, err + } + return rt, nil +} + +// Run runs the Generators in this Runtime against its packages, printing +// errors (except type errors, which common result from using TypeChecker with +// filters), returning true if errors were found. +func (r *Runtime) Run() bool { + // TODO(directxman12): we could make this parallel, + // but we'd need to ensure all underlying machinery is threadsafe + if len(r.Generators) == 0 { + fmt.Fprintln(os.Stderr, "no generators to run") + return true + } + + for _, gen := range r.Generators { + ctx := r.GenerationContext // make a shallow copy + ctx.OutputRule = r.OutputRules.ForGenerator(gen) + if err := (*gen).Generate(&ctx); err != nil { + fmt.Fprintln(os.Stderr, err) + } + } + + // skip TypeErrors -- they're probably just from partial typechecking in crd-gen + return loader.PrintErrors(r.Roots, packages.TypeError) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go new file mode 100644 index 000000000..d84d1798b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package help contains utilities for actually writing out marker help. +// +// Namely, it contains a series of structs (and helpers for producing them) +// that represent a merged view of marker definition and help that can be used +// for consumption by the pretty subpackage (for terminal help) or serialized +// as JSON (e.g. for generating HTML help). +package help diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go new file mode 100644 index 000000000..06622db74 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/doc.go @@ -0,0 +1,30 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package pretty contains utilities for formatting terminal help output, +// and a use of those to display marker help. +// +// Terminal Output +// +// The Span interface and Table struct allow you to construct tables with +// colored formatting, without causing ANSI formatting characters to mess up width +// calculations. +// +// Marker Help +// +// The MarkersSummary prints a summary table for marker help, while the MarkersDetails +// prints out more detailed information, with explainations of the individual marker fields. +package pretty diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go new file mode 100644 index 000000000..3e34cc0d6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/help.go @@ -0,0 +1,171 @@ +package pretty + +import ( + "fmt" + "io" + + "sigs.k8s.io/controller-tools/pkg/genall/help" + + "github.com/fatih/color" +) + +var ( + headingStyle = Decoration(*color.New(color.Bold, color.Underline)) + markerNameStyle = Decoration(*color.New(color.Bold)) + fieldSummaryStyle = Decoration(*color.New(color.FgGreen, color.Italic)) + markerTargetStyle = Decoration(*color.New(color.Faint)) + fieldDetailStyle = Decoration(*color.New(color.Italic, color.FgGreen)) + deprecatedStyle = Decoration(*color.New(color.CrossedOut)) +) + +// MarkersSummary returns a condensed summary of help for the given markers. +func MarkersSummary(groupName string, markers []help.MarkerDoc) Span { + out := new(SpanWriter) + + out.Print(Text("\n")) + out.Print(headingStyle.Containing(Text(groupName))) + out.Print(Text("\n\n")) + + table := &Table{Sizing: &TableCalculator{Padding: 2}} + for _, marker := range markers { + table.StartRow() + table.Column(MarkerSyntaxHelp(marker)) + table.Column(markerTargetStyle.Containing(Text(marker.Target))) + + summary := new(SpanWriter) + if marker.DeprecatedInFavorOf != nil && len(*marker.DeprecatedInFavorOf) > 0 { + summary.Print(markerNameStyle.Containing(Text("(use "))) + summary.Print(markerNameStyle.Containing(Text(*marker.DeprecatedInFavorOf))) + summary.Print(markerNameStyle.Containing(Text(") "))) + } + summary.Print(Text(marker.Summary)) + table.Column(summary) + + table.EndRow() + } + out.Print(table) + + out.Print(Text("\n")) + + return out +} + +// MarkersDetails returns detailed help for the given markers, including detailed field help. +func MarkersDetails(fullDetail bool, groupName string, markers []help.MarkerDoc) Span { + out := new(SpanWriter) + + out.Print(Line(headingStyle.Containing(Text(groupName)))) + out.Print(Newlines(2)) + + for _, marker := range markers { + out.Print(Line(markerName(marker))) + out.Print(Text(" ")) + out.Print(markerTargetStyle.Containing(Text(marker.Target))) + + summary := new(SpanWriter) + if marker.DeprecatedInFavorOf != nil && len(*marker.DeprecatedInFavorOf) > 0 { + summary.Print(markerNameStyle.Containing(Text("(use "))) + summary.Print(markerNameStyle.Containing(Text(*marker.DeprecatedInFavorOf))) + summary.Print(markerNameStyle.Containing(Text(") "))) + } + summary.Print(Text(marker.Summary)) + + if !marker.AnonymousField() { + out.Print(Indented(1, Line(summary))) + if len(marker.Details) > 0 && fullDetail { + out.Print(Indented(1, Line(Text(marker.Details)))) + } + } + + if marker.AnonymousField() { + out.Print(Indented(1, Line(fieldDetailStyle.Containing(FieldSyntaxHelp(marker.Fields[0]))))) + out.Print(Text(" ")) + out.Print(summary) + if len(marker.Details) > 0 && fullDetail { + out.Print(Indented(2, Line(Text(marker.Details)))) + } + out.Print(Newlines(1)) + } else if !marker.Empty() { + out.Print(Newlines(1)) + if fullDetail { + for _, arg := range marker.Fields { + out.Print(Indented(1, Line(fieldDetailStyle.Containing(FieldSyntaxHelp(arg))))) + out.Print(Indented(2, Line(Text(arg.Summary)))) + if len(arg.Details) > 0 && fullDetail { + out.Print(Indented(2, Line(Text(arg.Details)))) + out.Print(Newlines(1)) + } + } + out.Print(Newlines(1)) + } else { + table := &Table{Sizing: &TableCalculator{Padding: 2}} + for _, arg := range marker.Fields { + table.StartRow() + table.Column(fieldDetailStyle.Containing(FieldSyntaxHelp(arg))) + table.Column(Text(arg.Summary)) + table.EndRow() + } + + out.Print(Indented(1, table)) + } + } else { + out.Print(Newlines(1)) + } + } + + return out +} + +func FieldSyntaxHelp(arg help.FieldHelp) Span { + return fieldSyntaxHelp(arg, "") +} + +// fieldSyntaxHelp prints the syntax help for a particular marker argument. +func fieldSyntaxHelp(arg help.FieldHelp, sep string) Span { + if arg.Optional { + return FromWriter(func(out io.Writer) error { + _, err := fmt.Fprintf(out, "[%s%s=<%s>]", sep, arg.Name, arg.TypeString()) + return err + }) + } + return FromWriter(func(out io.Writer) error { + _, err := fmt.Fprintf(out, "%s%s=<%s>", sep, arg.Name, arg.TypeString()) + return err + }) +} + +// markerName returns a span containing just the appropriately-formatted marker name. +func markerName(def help.MarkerDoc) Span { + if def.DeprecatedInFavorOf != nil { + return deprecatedStyle.Containing(Text("+" + def.Name)) + } + return markerNameStyle.Containing(Text("+" + def.Name)) +} + +// MarkerSyntaxHelp assembles syntax help for a given marker. +func MarkerSyntaxHelp(def help.MarkerDoc) Span { + out := new(SpanWriter) + + out.Print(markerName(def)) + + if def.Empty() { + return out + } + + sep := ":" + if def.AnonymousField() { + sep = "" + } + + fieldStyle := fieldSummaryStyle + if def.DeprecatedInFavorOf != nil { + fieldStyle = deprecatedStyle + } + + for _, arg := range def.Fields { + out.Print(fieldStyle.Containing(fieldSyntaxHelp(arg, sep))) + sep = "," + } + + return out +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go new file mode 100644 index 000000000..8d7452a0b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/print.go @@ -0,0 +1,304 @@ +package pretty + +import ( + "bytes" + "fmt" + "io" + + "github.com/fatih/color" +) + +// NB(directxman12): this isn't particularly elegant, but it's also +// sufficiently simple as to be maintained here. Man (roff) would've +// probably worked, but it's not necessarily on Windows by default. + +// Span is a chunk of content that is writable to an output, but knows how to +// calculate its apparent visual "width" on the terminal (not to be confused +// with the raw length, which may include zero-width coloring sequences). +type Span interface { + // VisualLength reports the "width" as perceived by the user on the terminal + // (i.e. widest line, ignoring ANSI escape characters). + VisualLength() int + // WriteTo writes the full span contents to the given writer. + WriteTo(io.Writer) error +} + +// Table is a Span that writes its data in table form, with sizing controlled +// by the given table calculator. Rows are started with StartRow, followed by +// some calls to Column, followed by a call to EndRow. Once all rows are +// added, the table can be used as a Span. +type Table struct { + Sizing *TableCalculator + + cellsByRow [][]Span + colSizes []int +} + +// StartRow starts a new row. +// It must eventually be followed by EndRow. +func (t *Table) StartRow() { + t.cellsByRow = append(t.cellsByRow, []Span(nil)) +} + +// EndRow ends the currently started row. +func (t *Table) EndRow() { + lastRow := t.cellsByRow[len(t.cellsByRow)-1] + sizes := make([]int, len(lastRow)) + for i, cell := range lastRow { + sizes[i] = cell.VisualLength() + } + t.Sizing.AddRowSizes(sizes...) +} + +// Column adds the given span as a new column to the current row. +func (t *Table) Column(contents Span) { + currentRowInd := len(t.cellsByRow) - 1 + t.cellsByRow[currentRowInd] = append(t.cellsByRow[currentRowInd], contents) +} + +// SkipRow prints a span without having it contribute to the table calculation. +func (t *Table) SkipRow(contents Span) { + t.cellsByRow = append(t.cellsByRow, []Span{contents}) +} + +func (t *Table) WriteTo(out io.Writer) error { + if t.colSizes == nil { + t.colSizes = t.Sizing.ColumnWidths() + } + + for _, cells := range t.cellsByRow { + currentPosition := 0 + for colInd, cell := range cells { + colSize := t.colSizes[colInd] + diff := colSize - cell.VisualLength() + + if err := cell.WriteTo(out); err != nil { + return err + } + + if diff > 0 { + if err := writePadding(out, columnPadding, diff); err != nil { + return err + } + } + currentPosition += colSize + } + + if _, err := fmt.Fprint(out, "\n"); err != nil { + return err + } + } + + return nil +} + +func (t *Table) VisualLength() int { + if t.colSizes == nil { + t.colSizes = t.Sizing.ColumnWidths() + } + + res := 0 + for _, colSize := range t.colSizes { + res += colSize + } + return res +} + +// Text is a span that simply contains raw text. It's a good starting point. +type Text string + +func (t Text) VisualLength() int { return len(t) } +func (t Text) WriteTo(w io.Writer) error { + _, err := w.Write([]byte(t)) + return err +} + +// indented is a span that indents all lines by the given number of tabs. +type indented struct { + Amount int + Content Span +} + +func (i *indented) VisualLength() int { return i.Content.VisualLength() } +func (i *indented) WriteTo(w io.Writer) error { + var out bytes.Buffer + if err := i.Content.WriteTo(&out); err != nil { + return err + } + + lines := bytes.Split(out.Bytes(), []byte("\n")) + for lineInd, line := range lines { + if lineInd != 0 { + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + } + if len(line) == 0 { + continue + } + + if err := writePadding(w, indentPadding, i.Amount); err != nil { + return err + } + if _, err := w.Write(line); err != nil { + return err + } + } + return nil +} + +// Indented returns a span that indents all lines by the given number of tabs. +func Indented(amt int, content Span) Span { + return &indented{Amount: amt, Content: content} +} + +// fromWriter is a span that takes content from a function expecting a Writer. +type fromWriter struct { + cache []byte + cacheError error + run func(io.Writer) error +} + +func (f *fromWriter) VisualLength() int { + if f.cache == nil { + var buf bytes.Buffer + if err := f.run(&buf); err != nil { + f.cacheError = err + } + f.cache = buf.Bytes() + } + return len(f.cache) +} +func (f *fromWriter) WriteTo(w io.Writer) error { + if f.cache != nil { + if f.cacheError != nil { + return f.cacheError + } + _, err := w.Write(f.cache) + return err + } + return f.run(w) +} + +// FromWriter returns a span that takes content from a function expecting a Writer. +func FromWriter(run func(io.Writer) error) Span { + return &fromWriter{run: run} +} + +// Decoration represents a terminal decoration. +type Decoration color.Color + +// Containing returns a Span that has the given decoration applied. +func (d Decoration) Containing(contents Span) Span { + return &decorated{ + Contents: contents, + Attributes: color.Color(d), + } +} + +// decorated is a span that has some terminal decoration applied. +type decorated struct { + Contents Span + Attributes color.Color +} + +func (d *decorated) VisualLength() int { return d.Contents.VisualLength() } +func (d *decorated) WriteTo(w io.Writer) error { + oldOut := color.Output + color.Output = w + defer func() { color.Output = oldOut }() + + d.Attributes.Set() + defer color.Unset() + + return d.Contents.WriteTo(w) +} + +// SpanWriter is a span that contains multiple sub-spans. +type SpanWriter struct { + contents []Span +} + +func (m *SpanWriter) VisualLength() int { + res := 0 + for _, span := range m.contents { + res += span.VisualLength() + } + return res +} +func (m *SpanWriter) WriteTo(w io.Writer) error { + for _, span := range m.contents { + if err := span.WriteTo(w); err != nil { + return err + } + } + return nil +} + +// Print adds a new span to this SpanWriter. +func (m *SpanWriter) Print(s Span) { + m.contents = append(m.contents, s) +} + +// lines is a span that adds some newlines, optionally followed by some content. +type lines struct { + content Span + amountBefore int +} + +func (l *lines) VisualLength() int { + if l.content == nil { + return 0 + } + return l.content.VisualLength() +} +func (l *lines) WriteTo(w io.Writer) error { + if err := writePadding(w, linesPadding, l.amountBefore); err != nil { + return err + } + if l.content != nil { + if err := l.content.WriteTo(w); err != nil { + return err + } + } + return nil +} + +// Newlines returns a span just containing some newlines. +func Newlines(amt int) Span { + return &lines{amountBefore: amt} +} + +// Line returns a span that emits a newline, followed by the given content. +func Line(content Span) Span { + return &lines{amountBefore: 1, content: content} +} + +var ( + columnPadding = []byte(" ") + indentPadding = []byte("\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t") + linesPadding = []byte("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n") +) + +// writePadding writes out padding of the given type in the given amount to the writer. +// Each byte in the padding buffer contributes 1 to the amount -- the padding being +// a buffer is just for efficiency. +func writePadding(out io.Writer, typ []byte, amt int) error { + if amt <= len(typ) { + _, err := out.Write(typ[:amt]) + return err + } + + num := amt / len(typ) + rem := amt % len(typ) + for i := 0; i < num; i++ { + if _, err := out.Write(typ); err != nil { + return err + } + } + + if _, err := out.Write(typ[:rem]); err != nil { + return err + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go new file mode 100644 index 000000000..5a0b4752a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/pretty/table.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pretty + +// TableCalculator calculates column widths (with optional padding) +// for a table based on the maximum required column width. +type TableCalculator struct { + cellSizesByCol [][]int + + Padding int + MaxWidth int +} + +// AddRowSizes registers a new row with cells of the given sizes. +func (c *TableCalculator) AddRowSizes(cellSizes ...int) { + if len(cellSizes) > len(c.cellSizesByCol) { + for range cellSizes[len(c.cellSizesByCol):] { + c.cellSizesByCol = append(c.cellSizesByCol, []int(nil)) + } + } + for i, size := range cellSizes { + c.cellSizesByCol[i] = append(c.cellSizesByCol[i], size) + } +} + +// ColumnWidths calculates the appropriate column sizes given the +// previously registered rows. +func (c *TableCalculator) ColumnWidths() []int { + maxColWidths := make([]int, len(c.cellSizesByCol)) + + for colInd, cellSizes := range c.cellSizesByCol { + max := 0 + for _, cellSize := range cellSizes { + if max < cellSize { + max = cellSize + } + } + maxColWidths[colInd] = max + } + + actualMaxWidth := c.MaxWidth - c.Padding + for i, width := range maxColWidths { + if actualMaxWidth > 0 && width > actualMaxWidth { + maxColWidths[i] = actualMaxWidth + } + maxColWidths[i] += c.Padding + } + + return maxColWidths +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go new file mode 100644 index 000000000..53c923e34 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/sort.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package help + +import ( + "strings" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// SortGroup knows how to sort and group marker definitions. +type SortGroup interface { + // Less is equivalent to the Less function from sort, and is used to sort the markers. + Less(*markers.Definition, *markers.Definition) bool + // Group returns the "group" that a given marker belongs to. + Group(*markers.Definition, *markers.DefinitionHelp) string +} + +var ( + // SortByCategory sorts the markers by name and groups them by their help category. + SortByCategory = sortByCategory{} + + // SortByOption sorts by the generator that the option belongs to. + SortByOption = optionsSort{} +) + +type sortByCategory struct{} + +func (sortByCategory) Group(_ *markers.Definition, help *markers.DefinitionHelp) string { + if help == nil { + return "" + } + return help.Category +} +func (sortByCategory) Less(i, j *markers.Definition) bool { + return i.Name < j.Name +} + +type optionsSort struct{} + +func (optionsSort) Less(i, j *markers.Definition) bool { + iParts := strings.Split(i.Name, ":") + jParts := strings.Split(j.Name, ":") + + iGen := "" + iRule := "" + jGen := "" + jRule := "" + + switch len(iParts) { + case 1: + iGen = iParts[0] + // two means a default output rule, so ignore + case 2: + iRule = iParts[1] + case 3: + iGen = iParts[1] + iRule = iParts[2] + } + switch len(jParts) { + case 1: + jGen = jParts[0] + // two means a default output rule, so ignore + case 2: + jRule = jParts[1] + case 3: + jGen = jParts[1] + jRule = jParts[2] + } + + if iGen != jGen { + return iGen > jGen + } + + return iRule < jRule +} +func (optionsSort) Group(def *markers.Definition, _ *markers.DefinitionHelp) string { + parts := strings.Split(def.Name, ":") + + switch len(parts) { + case 1: + if parts[0] == "paths" { + return "generic" + } + return "generators" + case 2: + return "output rules (optionally as output::...)" + default: + return "" + // three means a marker-specific output rule, ignore + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go new file mode 100644 index 000000000..be1110432 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/help/types.go @@ -0,0 +1,215 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package help + +import ( + "sort" + "strings" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +// DetailedHelp contains both a summary and further details. +type DetailedHelp struct { + // Summary contains a one-line description. + Summary string `json:"summary"` + // Details contains further information. + Details string `json:"details,omitempty"` +} + +// Argument is the type data for a marker argument. +type Argument struct { + // Type is the data type of the argument (string, bool, int, slice, any, raw, invalid) + Type string `json:"type"` + // Optional marks this argument as optional. + Optional bool `json:"optional"` + // ItemType contains the type of the slice item, if this is a slice + ItemType *Argument `json:"itemType,omitempty"` +} + +func (a Argument) typeString(out *strings.Builder) { + if a.Type == "slice" { + out.WriteString("[]") + a.ItemType.typeString(out) + return + } + + out.WriteString(a.Type) +} + +// TypeString returns a string roughly equivalent +// (but not identical) to the underlying Go type that +// this argument would parse to. It's mainly useful +// for user-friendly formatting of this argument (e.g. +// help strings). +func (a Argument) TypeString() string { + out := &strings.Builder{} + a.typeString(out) + return out.String() +} + +// FieldHelp contains information required to print documentation for a marker field. +type FieldHelp struct { + // Name is the field name. + Name string `json:"name"` + // Argument is the type of the field. + Argument `json:",inline"` + + // DetailedHelp contains the textual help for the field. + DetailedHelp `json:",inline"` +} + +// MarkerDoc contains information required to print documentation for a marker. +type MarkerDoc struct { + // definition + + // Name is the name of the marker. + Name string `json:"name"` + // Target is the target (field, package, type) of the marker. + Target string `json:"target"` + + // help + + // DetailedHelp is the textual help for the marker. + DetailedHelp `json:",inline"` + // Category is the general "category" that this marker belongs to. + Category string `json:"category"` + // DeprecatedInFavorOf marks that this marker shouldn't be used when + // non-nil. If also non-empty, another marker should be used instead. + DeprecatedInFavorOf *string `json:"deprecatedInFavorOf,omitempty"` + // Fields is the type and help data for each field of this marker. + Fields []FieldHelp `json:"fields,omitempty"` +} + +// Empty checks if this marker has any arguments, returning true if not. +func (m MarkerDoc) Empty() bool { + return len(m.Fields) == 0 +} + +// AnonymousField chekcs if this is an single-valued marker +// (as opposed to having named fields). +func (m MarkerDoc) AnonymousField() bool { + return len(m.Fields) == 1 && m.Fields[0].Name == "" +} + +// ForArgument returns the equivalent documentation for a marker argument. +func ForArgument(argRaw markers.Argument) Argument { + res := Argument{ + Optional: argRaw.Optional, + } + + if argRaw.ItemType != nil { + itemType := ForArgument(*argRaw.ItemType) + res.ItemType = &itemType + } + + switch argRaw.Type { + case markers.IntType: + res.Type = "int" + case markers.StringType: + res.Type = "string" + case markers.BoolType: + res.Type = "bool" + case markers.AnyType: + res.Type = "any" + case markers.SliceType: + res.Type = "slice" + case markers.RawType: + res.Type = "raw" + case markers.InvalidType: + res.Type = "invalid" + } + + return res +} + +// ForDefinition returns the equivalent marker documentation for a given marker definition and spearate help. +func ForDefinition(defn *markers.Definition, maybeHelp *markers.DefinitionHelp) MarkerDoc { + var help markers.DefinitionHelp + if maybeHelp != nil { + help = *maybeHelp + } + + res := MarkerDoc{ + Name: defn.Name, + Category: help.Category, + DeprecatedInFavorOf: help.DeprecatedInFavorOf, + Target: defn.Target.String(), + DetailedHelp: DetailedHelp{Summary: help.Summary, Details: help.Details}, + } + + helpByField := help.FieldsHelp(defn) + + // TODO(directxman12): deterministic ordering + for fieldName, fieldHelpRaw := range helpByField { + fieldInfo := defn.Fields[fieldName] + fieldHelp := FieldHelp{ + Name: fieldName, + DetailedHelp: DetailedHelp{Summary: fieldHelpRaw.Summary, Details: fieldHelpRaw.Details}, + Argument: ForArgument(fieldInfo), + } + + res.Fields = append(res.Fields, fieldHelp) + } + + sort.Slice(res.Fields, func(i, j int) bool { return res.Fields[i].Name < res.Fields[j].Name }) + + return res +} + +// CategoryDoc contains help information for all markers in a Category. +type CategoryDoc struct { + Category string `json:"category"` + Markers []MarkerDoc `json:"markers"` +} + +// ByCategory returns the marker help for markers in the given +// registry, grouped and sorted according to the given method. +func ByCategory(reg *markers.Registry, sorter SortGroup) []CategoryDoc { + groupedMarkers := make(map[string][]*markers.Definition) + + for _, marker := range reg.AllDefinitions() { + group := sorter.Group(marker, reg.HelpFor(marker)) + groupedMarkers[group] = append(groupedMarkers[group], marker) + } + allGroups := make([]string, 0, len(groupedMarkers)) + for groupName := range groupedMarkers { + allGroups = append(allGroups, groupName) + } + + sort.Strings(allGroups) + + res := make([]CategoryDoc, len(allGroups)) + for i, groupName := range allGroups { + markers := groupedMarkers[groupName] + sort.Slice(markers, func(i, j int) bool { + return sorter.Less(markers[i], markers[j]) + }) + + markerDocs := make([]MarkerDoc, len(markers)) + for i, marker := range markers { + markerDocs[i] = ForDefinition(marker, reg.HelpFor(marker)) + } + + res[i] = CategoryDoc{ + Category: groupName, + Markers: markerDocs, + } + } + + return res +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go new file mode 100644 index 000000000..46e191c0c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/input.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "io" + "os" +) + +// InputRule describes how to load non-code boilerplate artifacts. +// It's not used for loading code. +type InputRule interface { + // OpenForRead opens the given non-code artifact for reading. + OpenForRead(path string) (io.ReadCloser, error) +} +type inputFromFileSystem struct{} + +func (inputFromFileSystem) OpenForRead(path string) (io.ReadCloser, error) { + return os.Open(path) +} + +// InputFromFileSystem reads from the filesystem as normal. +var InputFromFileSystem = inputFromFileSystem{} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go new file mode 100644 index 000000000..45e3bc41f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/options.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "strings" + + "sigs.k8s.io/controller-tools/pkg/markers" +) + +var ( + InputPathsMarker = markers.Must(markers.MakeDefinition("paths", markers.DescribesPackage, InputPaths(nil))) +) + +// +controllertools:marker:generateHelp:category="" + +// InputPaths represents paths and go-style path patterns to use as package roots. +type InputPaths []string + +// RegisterOptionsMarkers registers "mandatory" options markers for FromOptions into the given registry. +// At this point, that's just InputPaths. +func RegisterOptionsMarkers(into *markers.Registry) error { + if err := into.Register(InputPathsMarker); err != nil { + return err + } + // NB(directxman12): we make this optional so we don't have a bootstrap problem with helpgen + if helpGiver, hasHelp := ((interface{})(InputPaths(nil))).(HasHelp); hasHelp { + into.AddHelp(InputPathsMarker, helpGiver.Help()) + } + return nil +} + +// RegistryFromOptions produces just the marker registry that would be used by FromOptions, without +// attempting to produce a full Runtime. This can be useful if you want to display help without +// trying to load roots. +func RegistryFromOptions(optionsRegistry *markers.Registry, options []string) (*markers.Registry, error) { + protoRt, err := protoFromOptions(optionsRegistry, options) + if err != nil { + return nil, err + } + reg := &markers.Registry{} + if err := protoRt.Generators.RegisterMarkers(reg); err != nil { + return nil, err + } + return reg, nil +} + +// FromOptions parses the options from markers stored in the given registry out into a runtime. +// The markers in the registry must be either +// +// a) Generators +// b) OutputRules +// c) InputPaths +// +// The paths specified in InputPaths are loaded as package roots, and the combined with +// the generators and the specified output rules to produce a runtime that can be run or +// further modified. Not default generators are used if none are specified -- you can check +// the output and rerun for that. +func FromOptions(optionsRegistry *markers.Registry, options []string) (*Runtime, error) { + + protoRt, err := protoFromOptions(optionsRegistry, options) + if err != nil { + return nil, err + } + + // make the runtime + genRuntime, err := protoRt.Generators.ForRoots(protoRt.Paths...) + if err != nil { + return nil, err + } + + // attempt to figure out what the user wants without a lot of verbose specificity: + // if the user specifies a default rule, assume that they probably want to fall back + // to that. Otherwise, assume that they just wanted to customize one option from the + // set, and leave the rest in the standard configuration. + if protoRt.OutputRules.Default != nil { + genRuntime.OutputRules = protoRt.OutputRules + return genRuntime, nil + } + + outRules := DirectoryPerGenerator("config", protoRt.GeneratorsByName) + for gen, rule := range protoRt.OutputRules.ByGenerator { + outRules.ByGenerator[gen] = rule + } + + genRuntime.OutputRules = outRules + return genRuntime, nil +} + +// protoFromOptions returns a proto-Runtime from the given options registry and +// options set. This can then be used to construct an actual Runtime. See the +// FromOptions function for more details about how the options work. +func protoFromOptions(optionsRegistry *markers.Registry, options []string) (protoRuntime, error) { + var gens Generators + rules := OutputRules{ + ByGenerator: make(map[*Generator]OutputRule), + } + var paths []string + + // collect the generators first, so that we can key the output on the actual + // generator, which matters if there's settings in the gen object and it's not a pointer. + outputByGen := make(map[string]OutputRule) + gensByName := make(map[string]*Generator) + + for _, rawOpt := range options { + if rawOpt[0] != '+' { + rawOpt = "+" + rawOpt // add a `+` to make it acceptable for usage with the registry + } + defn := optionsRegistry.Lookup(rawOpt, markers.DescribesPackage) + if defn == nil { + return protoRuntime{}, fmt.Errorf("unknown option %q", rawOpt[1:]) + } + + val, err := defn.Parse(rawOpt) + if err != nil { + return protoRuntime{}, fmt.Errorf("unable to parse option %q: %w", rawOpt[1:], err) + } + + switch val := val.(type) { + case Generator: + gens = append(gens, &val) + gensByName[defn.Name] = &val + case OutputRule: + _, genName := splitOutputRuleOption(defn.Name) + if genName == "" { + // it's a default rule + rules.Default = val + continue + } + + outputByGen[genName] = val + continue + case InputPaths: + paths = append(paths, val...) + default: + return protoRuntime{}, fmt.Errorf("unknown option marker %q", defn.Name) + } + } + + // actually associate the rules now that we know the generators + for genName, outputRule := range outputByGen { + gen, knownGen := gensByName[genName] + if !knownGen { + return protoRuntime{}, fmt.Errorf("non-invoked generator %q", genName) + } + + rules.ByGenerator[gen] = outputRule + } + + return protoRuntime{ + Paths: paths, + Generators: Generators(gens), + OutputRules: rules, + GeneratorsByName: gensByName, + }, nil +} + +// protoRuntime represents the raw pieces needed to compose a runtime, as +// parsed from some options. +type protoRuntime struct { + Paths []string + Generators Generators + OutputRules OutputRules + GeneratorsByName map[string]*Generator +} + +// splitOutputRuleOption splits a marker name of "output:rule:gen" or "output:rule" +// into its compontent rule and generator name. +func splitOutputRuleOption(name string) (ruleName string, genName string) { + parts := strings.SplitN(name, ":", 3) + if len(parts) == 3 { + // output:: + return parts[2], parts[1] + } + // output: + return parts[1], "" +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go new file mode 100644 index 000000000..b5a09e169 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/output.go @@ -0,0 +1,160 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genall + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// nopCloser is a WriteCloser whose Close +// is a no-op. +type nopCloser struct { + io.Writer +} + +func (n nopCloser) Close() error { + return nil +} + +// DirectoryPerGenerator produces output rules mapping output to a different subdirectory +// of the given base directory for each generator (with each subdirectory specified as +// the key in the input map). +func DirectoryPerGenerator(base string, generators map[string]*Generator) OutputRules { + rules := OutputRules{ + Default: OutputArtifacts{Config: OutputToDirectory(base)}, + ByGenerator: make(map[*Generator]OutputRule, len(generators)), + } + + for name, gen := range generators { + rules.ByGenerator[gen] = OutputArtifacts{ + Config: OutputToDirectory(filepath.Join(base, name)), + } + } + + return rules +} + +// OutputRules defines how to output artificats on a per-generator basis. +type OutputRules struct { + // Default is the output rule used when no specific per-generator overrides match. + Default OutputRule + // ByGenerator contains specific per-generator overrides. + // NB(directxman12): this is a pointer to avoid issues if a given Generator becomes unhashable + // (interface values compare by "dereferencing" their internal pointer first, whereas pointers + // compare by the actual pointer itself). + ByGenerator map[*Generator]OutputRule +} + +// ForGenerator returns the output rule that should be used +// by the given Generator. +func (o OutputRules) ForGenerator(gen *Generator) OutputRule { + if forGen, specific := o.ByGenerator[gen]; specific { + return forGen + } + return o.Default +} + +// OutputRule defines how to output artifacts from a generator. +type OutputRule interface { + // Open opens the given artifact path for writing. If a package is passed, + // the artifact is considered to be used as part of the package (e.g. + // generated code), while a nil package indicates that the artifact is + // config (or something else not involved in Go compilation). + Open(pkg *loader.Package, path string) (io.WriteCloser, error) +} + +// OutputToNothing skips outputting anything. +var OutputToNothing = outputToNothing{} + +// +controllertools:marker:generateHelp:category="" + +// outputToNothing skips outputting anything. +type outputToNothing struct{} + +func (o outputToNothing) Open(_ *loader.Package, _ string) (io.WriteCloser, error) { + return nopCloser{ioutil.Discard}, nil +} + +// +controllertools:marker:generateHelp:category="" + +// OutputToDirectory outputs each artifact to the given directory, regardless +// of if it's package-associated or not. +type OutputToDirectory string + +func (o OutputToDirectory) Open(_ *loader.Package, itemPath string) (io.WriteCloser, error) { + // ensure the directory exists + if err := os.MkdirAll(string(o), os.ModePerm); err != nil { + return nil, err + } + path := filepath.Join(string(o), itemPath) + return os.Create(path) +} + +// OutputToStdout outputs everything to standard-out, with no separation. +// +// Generally useful for single-artifact outputs. +var OutputToStdout = outputToStdout{} + +// +controllertools:marker:generateHelp:category="" + +// outputToStdout outputs everything to standard-out, with no separation. +// +// Generally useful for single-artifact outputs. +type outputToStdout struct{} + +func (o outputToStdout) Open(_ *loader.Package, itemPath string) (io.WriteCloser, error) { + return nopCloser{os.Stdout}, nil +} + +// +controllertools:marker:generateHelp:category="" + +// OutputArtifacts outputs artifacts to different locations, depending on +// whether they're package-associated or not. +// +// Non-package associated artifacts +// are output to the Config directory, while package-associated ones are output +// to their package's source files' directory, unless an alternate path is +// specified in Code. +type OutputArtifacts struct { + // Config points to the directory to which to write configuration. + Config OutputToDirectory + // Code overrides the directory in which to write new code (defaults to where the existing code lives). + Code OutputToDirectory `marker:",optional"` +} + +func (o OutputArtifacts) Open(pkg *loader.Package, itemPath string) (io.WriteCloser, error) { + if pkg == nil { + return o.Config.Open(pkg, itemPath) + } + + if o.Code != "" { + return o.Code.Open(pkg, itemPath) + } + + if len(pkg.CompiledGoFiles) == 0 { + return nil, fmt.Errorf("cannot output to a package with no path on disk") + } + outDir := filepath.Dir(pkg.CompiledGoFiles[0]) + outPath := filepath.Join(outDir, itemPath) + return os.Create(outPath) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go new file mode 100644 index 000000000..824d3af3f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/genall/zz_generated.markerhelp.go @@ -0,0 +1,89 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package genall + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (InputPaths) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "represents paths and go-style path patterns to use as package roots.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (OutputArtifacts) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "outputs artifacts to different locations, depending on whether they're package-associated or not. ", + Details: "Non-package associated artifacts are output to the Config directory, while package-associated ones are output to their package's source files' directory, unless an alternate path is specified in Code.", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Config": markers.DetailedHelp{ + Summary: "points to the directory to which to write configuration.", + Details: "", + }, + "Code": markers.DetailedHelp{ + Summary: "overrides the directory in which to write new code (defaults to where the existing code lives).", + Details: "", + }, + }, + } +} + +func (OutputToDirectory) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "outputs each artifact to the given directory, regardless of if it's package-associated or not.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (outputToNothing) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "skips outputting anything.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} + +func (outputToStdout) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "outputs everything to standard-out, with no separation. ", + Details: "Generally useful for single-artifact outputs.", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go new file mode 100644 index 000000000..a80065ec7 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/doc.go @@ -0,0 +1,60 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package loader defines helpers for loading packages from sources. It wraps +// go/packages, allow incremental loading of source code and manual control +// over which packages get type-checked. This allows for faster loading in +// cases where you don't actually care about certain imports. +// +// Because it uses go/packages, it's modules-aware, and works in both modules- +// and non-modules environments. +// +// Loading +// +// The main entrypoint for loading is LoadRoots, which traverse the package +// graph starting at the given patterns (file, package, path, or ...-wildcard, +// as one might pass to go list). Packages beyond the roots can be accessed +// via the Imports() method. Packages are initially loaded with export data +// paths, filenames, and imports. +// +// Packages are suitable for comparison, as each unique package only ever has +// one *Package object returned. +// +// Syntax and TypeChecking +// +// ASTs and type-checking information can be loaded with NeedSyntax and +// NeedTypesInfo, respectively. Both are idempotent -- repeated calls will +// simply re-use the cached contents. Note that NeedTypesInfo will *only* type +// check the current package -- if you want to type-check imports as well, +// you'll need to type-check them first. +// +// Reference Pruning and Recursive Checking +// +// In order to type-check using only the packages you care about, you can use a +// TypeChecker. TypeChecker will visit each top-level type declaration, +// collect (optionally filtered) references, and type-check references +// packages. +// +// Errors +// +// Errors can be added to each package. Use ErrFromNode to create an error +// from an AST node. Errors can then be printed (complete with file and +// position information) using PrintErrors, optionally filtered by error type. +// It's generally a good idea to filter out TypeErrors when doing incomplete +// type-checking with TypeChecker. You can use MaybeErrList to return multiple +// errors if you need to return an error instead of adding it to a package. +// AddError will later unroll it into individual errors. +package loader diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go new file mode 100644 index 000000000..aae8ef39f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/errors.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + "go/ast" + "go/token" +) + +// PositionedError represents some error with an associated position. +// The position is tied to some external token.FileSet. +type PositionedError struct { + Pos token.Pos + error +} + +// ErrFromNode returns the given error, with additional information +// attaching it to the given AST node. It will automatically map +// over error lists. +func ErrFromNode(err error, node ast.Node) error { + if asList, isList := err.(ErrList); isList { + resList := make(ErrList, len(asList)) + for i, baseErr := range asList { + resList[i] = ErrFromNode(baseErr, node) + } + return resList + } + return PositionedError{ + Pos: node.Pos(), + error: err, + } +} + +// MaybeErrList constructs an ErrList if the given list of +// errors has any errors, otherwise returning nil. +func MaybeErrList(errs []error) error { + if len(errs) == 0 { + return nil + } + return ErrList(errs) +} + +// ErrList is a list of errors aggregated together into a single error. +type ErrList []error + +func (l ErrList) Error() string { + return fmt.Sprintf("%v", []error(l)) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go new file mode 100644 index 000000000..cb34baf23 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/loader.go @@ -0,0 +1,360 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io/ioutil" + "os" + "sync" + + "golang.org/x/tools/go/packages" +) + +// Much of this is strongly inspired by the contents of go/packages, +// except that it allows for lazy loading of syntax and type-checking +// information to speed up cases where full traversal isn't needed. + +// PrintErrors print errors associated with all packages +// in the given package graph, starting at the given root +// packages and traversing through all imports. It will skip +// any errors of the kinds specified in filterKinds. It will +// return true if any errors were printed. +func PrintErrors(pkgs []*Package, filterKinds ...packages.ErrorKind) bool { + pkgsRaw := make([]*packages.Package, len(pkgs)) + for i, pkg := range pkgs { + pkgsRaw[i] = pkg.Package + } + toSkip := make(map[packages.ErrorKind]struct{}) + for _, errKind := range filterKinds { + toSkip[errKind] = struct{}{} + } + hadErrors := false + packages.Visit(pkgsRaw, nil, func(pkgRaw *packages.Package) { + for _, err := range pkgRaw.Errors { + if _, skip := toSkip[err.Kind]; skip { + continue + } + hadErrors = true + fmt.Fprintln(os.Stderr, err) + } + }) + return hadErrors +} + +// Package is a single, unique Go package that can be +// lazily parsed and type-checked. Packages should not +// be constructed directly -- instead, use LoadRoots. +// For a given call to LoadRoots, only a single instance +// of each package exists, and thus they may be used as keys +// and for comparison. +type Package struct { + *packages.Package + + imports map[string]*Package + + loader *loader + sync.Mutex +} + +// Imports returns the imports for the given package, indexed by +// package path (*not* name in any particular file). +func (p *Package) Imports() map[string]*Package { + if p.imports == nil { + p.imports = p.loader.packagesFor(p.Package.Imports) + } + + return p.imports +} + +// NeedTypesInfo indicates that type-checking information is needed for this package. +// Actual type-checking information can be accessed via the Types and TypesInfo fields. +func (p *Package) NeedTypesInfo() { + if p.TypesInfo != nil { + return + } + p.NeedSyntax() + p.loader.typeCheck(p) +} + +// NeedSyntax indicates that a parsed AST is needed for this package. +// Actual ASTs can be accessed via the Syntax field. +func (p *Package) NeedSyntax() { + if p.Syntax != nil { + return + } + out := make([]*ast.File, len(p.CompiledGoFiles)) + var wg sync.WaitGroup + wg.Add(len(p.CompiledGoFiles)) + for i, filename := range p.CompiledGoFiles { + go func(i int, filename string) { + defer wg.Done() + src, err := ioutil.ReadFile(filename) + if err != nil { + p.AddError(err) + return + } + out[i], err = p.loader.parseFile(filename, src) + if err != nil { + p.AddError(err) + return + } + }(i, filename) + } + wg.Wait() + for _, file := range out { + if file == nil { + return + } + } + p.Syntax = out +} + +// AddError adds an error to the errors associated with the given package. +func (p *Package) AddError(err error) { + switch typedErr := err.(type) { + case *os.PathError: + // file-reading errors + p.Errors = append(p.Errors, packages.Error{ + Pos: typedErr.Path + ":1", + Msg: typedErr.Err.Error(), + Kind: packages.ParseError, + }) + case scanner.ErrorList: + // parsing/scanning errors + for _, subErr := range typedErr { + p.Errors = append(p.Errors, packages.Error{ + Pos: subErr.Pos.String(), + Msg: subErr.Msg, + Kind: packages.ParseError, + }) + } + case types.Error: + // type-checking errors + p.Errors = append(p.Errors, packages.Error{ + Pos: typedErr.Fset.Position(typedErr.Pos).String(), + Msg: typedErr.Msg, + Kind: packages.TypeError, + }) + case ErrList: + for _, subErr := range typedErr { + p.AddError(subErr) + } + case PositionedError: + p.Errors = append(p.Errors, packages.Error{ + Pos: p.loader.cfg.Fset.Position(typedErr.Pos).String(), + Msg: typedErr.Error(), + Kind: packages.UnknownError, + }) + default: + // should only happen for external errors, like ref checking + p.Errors = append(p.Errors, packages.Error{ + Pos: p.ID + ":-", + Msg: err.Error(), + Kind: packages.UnknownError, + }) + } +} + +// loader loads packages and their imports. Loaded packages will have +// type size, imports, and exports file information populated. Additional +// information, like ASTs and type-checking information, can be accessed +// via methods on individual packages. +type loader struct { + // Roots are the loaded "root" packages in the package graph loaded via + // LoadRoots. + Roots []*Package + + // cfg contains the package loading config (initialized on demand) + cfg *packages.Config + // packages contains the cache of Packages indexed by the underlying + // package.Package, so that we don't ever produce two Packages with + // the same underlying packages.Package. + packages map[*packages.Package]*Package + packagesMu sync.Mutex +} + +// packageFor returns a wrapped Package for the given packages.Package, +// ensuring that there's a one-to-one mapping between the two. +// It's *not* threadsafe -- use packagesFor for that. +func (l *loader) packageFor(pkgRaw *packages.Package) *Package { + if l.packages[pkgRaw] == nil { + l.packages[pkgRaw] = &Package{ + Package: pkgRaw, + loader: l, + } + } + return l.packages[pkgRaw] +} + +// packagesFor returns a map of Package objects for each packages.Package in the input +// map, ensuring that there's a one-to-one mapping between package.Package and Package +// (as per packageFor). +func (l *loader) packagesFor(pkgsRaw map[string]*packages.Package) map[string]*Package { + l.packagesMu.Lock() + defer l.packagesMu.Unlock() + + out := make(map[string]*Package, len(pkgsRaw)) + for name, rawPkg := range pkgsRaw { + out[name] = l.packageFor(rawPkg) + } + return out +} + +// typeCheck type-checks the given package. +func (l *loader) typeCheck(pkg *Package) { + // don't conflict with typeCheckFromExportData + + pkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + + pkg.Fset = l.cfg.Fset + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + importedPkg := pkg.Imports()[path] + if importedPkg == nil { + return nil, fmt.Errorf("package %q possibly creates an import loop", path) + } + + // it's possible to have a call to check in parallel to a call to this + // if one package in the package graph gets its dependency filtered out, + // but another doesn't (so one wants a "dummy" package here, and another + // wants the full check). + // + // Thus, we need to lock here (at least for the time being) to avoid + // races between the above write to `pkg.Types` and this checking of + // importedPkg.Types. + importedPkg.Lock() + defer importedPkg.Unlock() + + if importedPkg.Types != nil && importedPkg.Types.Complete() { + return importedPkg.Types, nil + } + + // if we haven't already loaded typecheck data, we don't care about this package's types + return types.NewPackage(importedPkg.PkgPath, importedPkg.Name), nil + }) + + var errs []error + + // type-check + checkConfig := &types.Config{ + Importer: importer, + + IgnoreFuncBodies: true, // we only need decl-level info + + Error: func(err error) { + errs = append(errs, err) + }, + + Sizes: pkg.TypesSizes, + } + if err := types.NewChecker(checkConfig, l.cfg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax); err != nil { + errs = append(errs, err) + } + + // make sure that if a given sub-import is ill-typed, we mark this package as ill-typed as well. + illTyped := len(errs) > 0 + if !illTyped { + for _, importedPkg := range pkg.Imports() { + if importedPkg.IllTyped { + illTyped = true + break + } + } + } + pkg.IllTyped = illTyped + + // publish errors to the package error list. + for _, err := range errs { + pkg.AddError(err) + } +} + +// parseFile parses the given file, including comments. +func (l *loader) parseFile(filename string, src []byte) (*ast.File, error) { + // skip function bodies + file, err := parser.ParseFile(l.cfg.Fset, filename, src, parser.AllErrors|parser.ParseComments) + if err != nil { + return nil, err + } + + return file, nil +} + +// LoadRoots loads the given "root" packages by path, transitively loading +// and all imports as well. +// +// Loaded packages will have type size, imports, and exports file information +// populated. Additional information, like ASTs and type-checking information, +// can be accessed via methods on individual packages. +func LoadRoots(roots ...string) ([]*Package, error) { + return LoadRootsWithConfig(&packages.Config{}, roots...) +} + +// LoadRootsWithConfig functions like LoadRoots, except that it allows passing +// a custom loading config. The config will be modified to suit the needs of +// the loader. +// +// This is generally only useful for use in testing when you need to modify +// loading settings to load from a fake location. +func LoadRootsWithConfig(cfg *packages.Config, roots ...string) ([]*Package, error) { + l := &loader{ + cfg: cfg, + packages: make(map[*packages.Package]*Package), + } + l.cfg.Mode |= packages.LoadImports | packages.NeedTypesSizes + if l.cfg.Fset == nil { + l.cfg.Fset = token.NewFileSet() + } + // put our build flags first so that callers can override them + l.cfg.BuildFlags = append([]string{"-tags", "ignore_autogenerated"}, l.cfg.BuildFlags...) + + rawPkgs, err := packages.Load(l.cfg, roots...) + if err != nil { + return nil, err + } + + for _, rawPkg := range rawPkgs { + l.Roots = append(l.Roots, l.packageFor(rawPkg)) + } + + return l.Roots, nil +} + +// importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go new file mode 100644 index 000000000..3b783e168 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/paths.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "strings" +) + +// NonVendorPath returns a package path that does not include anything before the +// last vendor directory. This is useful for when using vendor directories, +// and using go/types.Package.Path(), which returns the full path including vendor. +// +// If you're using this, make sure you really need it -- it's better to index by +// the actual Package object when you can. +func NonVendorPath(rawPath string) string { + parts := strings.Split(rawPath, "/vendor/") + return parts[len(parts)-1] +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go new file mode 100644 index 000000000..9e2989cf5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/refs.go @@ -0,0 +1,254 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "fmt" + + "go/ast" + "strconv" + "sync" +) + +// NB(directxman12): most of this is done by the typechecker, +// but it's a bit slow/heavyweight for what we want -- we want +// to resolve external imports *only* if we actually need them. + +// Basically, what we do is: +// 1. Map imports to names +// 2. Find all explicit external references (`name.type`) +// 3. Find all referenced packages by merging explicit references and dot imports +// 4. Only type-check those packages +// 5. Ignore type-checking errors from the missing packages, because we won't ever +// touch unloaded types (they're probably used in ignored fields/types, variables, or functions) +// (done using PrintErrors with an ignore argument from the caller). +// 6. Notice any actual type-checking errors via invalid types + +// importsMap saves import aliases, mapping them to underlying packages. +type importsMap struct { + // dotImports maps package IDs to packages for any packages that have/ been imported as `.` + dotImports map[string]*Package + // byName maps package aliases or names to the underlying package. + byName map[string]*Package +} + +// mapImports maps imports from the names they use in the given file to the underlying package, +// using a map of package import paths to packages (generally from Package.Imports()). +func mapImports(file *ast.File, importedPkgs map[string]*Package) (*importsMap, error) { + m := &importsMap{ + dotImports: make(map[string]*Package), + byName: make(map[string]*Package), + } + for _, importSpec := range file.Imports { + path, err := strconv.Unquote(importSpec.Path.Value) + if err != nil { + return nil, ErrFromNode(err, importSpec.Path) + } + importedPkg := importedPkgs[path] + if importedPkg == nil { + return nil, ErrFromNode(fmt.Errorf("no such package located"), importSpec.Path) + } + if importSpec.Name == nil { + m.byName[importedPkg.Name] = importedPkg + continue + } + if importSpec.Name.Name == "." { + m.dotImports[importedPkg.ID] = importedPkg + continue + } + m.byName[importSpec.Name.Name] = importedPkg + } + + return m, nil +} + +// referenceSet finds references to external packages' types in the given file, +// without otherwise calling into the type-checker. When checking structs, +// it only checks fields with JSON tags. +type referenceSet struct { + file *ast.File + imports *importsMap + pkg *Package + + externalRefs map[*Package]struct{} +} + +func (r *referenceSet) init() { + if r.externalRefs == nil { + r.externalRefs = make(map[*Package]struct{}) + } +} + +// NodeFilter filters nodes, accepting them for reference collection +// when true is returned and rejecting them when false is returned. +type NodeFilter func(ast.Node) bool + +// collectReferences saves all references to external types in the given info. +func (r *referenceSet) collectReferences(rawType ast.Expr, filterNode NodeFilter) { + r.init() + col := &referenceCollector{ + refs: r, + filterNode: filterNode, + } + ast.Walk(col, rawType) +} + +// external saves an external reference to the given named package. +func (r *referenceSet) external(pkgName string) { + pkg := r.imports.byName[pkgName] + if pkg == nil { + r.pkg.AddError(fmt.Errorf("use of unimported package %q", pkgName)) + return + } + r.externalRefs[pkg] = struct{}{} +} + +// referenceCollector visits nodes in an AST, adding external references to a +// referenceSet. +type referenceCollector struct { + refs *referenceSet + filterNode NodeFilter +} + +func (c *referenceCollector) Visit(node ast.Node) ast.Visitor { + if !c.filterNode(node) { + return nil + } + switch typedNode := node.(type) { + case *ast.Ident: + // local reference or dot-import, ignore + return nil + case *ast.SelectorExpr: + pkgName := typedNode.X.(*ast.Ident).Name + c.refs.external(pkgName) + return nil + default: + return c + } +} + +// allReferencedPackages finds all directly referenced packages in the given package. +func allReferencedPackages(pkg *Package, filterNodes NodeFilter) []*Package { + pkg.NeedSyntax() + refsByFile := make(map[*ast.File]*referenceSet) + for _, file := range pkg.Syntax { + imports, err := mapImports(file, pkg.Imports()) + if err != nil { + pkg.AddError(err) + return nil + } + refs := &referenceSet{ + file: file, + imports: imports, + pkg: pkg, + } + refsByFile[file] = refs + } + + EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + refs := refsByFile[file] + refs.collectReferences(spec.Type, filterNodes) + }) + + allPackages := make(map[*Package]struct{}) + for _, refs := range refsByFile { + for _, pkg := range refs.imports.dotImports { + allPackages[pkg] = struct{}{} + } + for ref := range refs.externalRefs { + allPackages[ref] = struct{}{} + } + } + + res := make([]*Package, 0, len(allPackages)) + for pkg := range allPackages { + res = append(res, pkg) + } + return res +} + +// TypeChecker performs type-checking on a limitted subset of packages by +// checking each package's types' externally-referenced types, and only +// type-checking those packages. +type TypeChecker struct { + checkedPackages map[*Package]struct{} + filterNodes NodeFilter + sync.Mutex +} + +// Check type-checks the given package and all packages referenced +// by types that pass through (have true returned by) filterNodes. +func (c *TypeChecker) Check(root *Package, filterNodes NodeFilter) { + c.init() + + if filterNodes == nil { + filterNodes = c.filterNodes + } + + // use a sub-checker with the appropriate settings + (&TypeChecker{ + filterNodes: filterNodes, + checkedPackages: c.checkedPackages, + }).check(root) +} + +func (c *TypeChecker) init() { + if c.checkedPackages == nil { + c.checkedPackages = make(map[*Package]struct{}) + } + if c.filterNodes == nil { + // check every type by default + c.filterNodes = func(_ ast.Node) bool { + return true + } + } +} + +// check recursively type-checks the given package, only loading packages that +// are actually referenced by our types (it's the actual implementation of Check, +// without initialization). +func (c *TypeChecker) check(root *Package) { + root.Lock() + defer root.Unlock() + + c.Lock() + _, ok := c.checkedPackages[root] + c.Unlock() + if ok { + return + } + + refedPackages := allReferencedPackages(root, c.filterNodes) + + // first, resolve imports for all leaf packages... + var wg sync.WaitGroup + for _, pkg := range refedPackages { + wg.Add(1) + go func(pkg *Package) { + defer wg.Done() + c.check(pkg) + }(pkg) + } + wg.Wait() + + // ...then, we can safely type-check ourself + root.NeedTypesInfo() + + c.Lock() + defer c.Unlock() + c.checkedPackages[root] = struct{}{} +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go b/vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go new file mode 100644 index 000000000..b5646fde1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/loader/visit.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loader + +import ( + "go/ast" + "reflect" + "strconv" +) + +// TypeCallback is a callback called for each raw AST (gendecl, typespec) combo. +type TypeCallback func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) + +// EachType calls the given callback for each (gendecl, typespec) combo in the +// given package. Generally, using markers.EachType is better when working +// with marker data, and has a more convinient representation. +func EachType(pkg *Package, cb TypeCallback) { + visitor := &typeVisitor{ + callback: cb, + } + pkg.NeedSyntax() + for _, file := range pkg.Syntax { + visitor.file = file + ast.Walk(visitor, file) + } +} + +// typeVisitor visits all TypeSpecs, calling the given callback for each. +type typeVisitor struct { + callback TypeCallback + decl *ast.GenDecl + file *ast.File +} + +// Visit visits all TypeSpecs. +func (v *typeVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + v.decl = nil + return v + } + + switch typedNode := node.(type) { + case *ast.File: + v.file = typedNode + return v + case *ast.GenDecl: + v.decl = typedNode + return v + case *ast.TypeSpec: + v.callback(v.file, v.decl, typedNode) + return nil // don't recurse + default: + return nil + } +} + +// ParseAstTag parses the given raw tag literal into a reflect.StructTag. +func ParseAstTag(tag *ast.BasicLit) reflect.StructTag { + if tag == nil { + return reflect.StructTag("") + } + tagStr, err := strconv.Unquote(tag.Value) + if err != nil { + return reflect.StructTag("") + } + return reflect.StructTag(tagStr) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go new file mode 100644 index 000000000..c5ea2345f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/collect.go @@ -0,0 +1,422 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "go/ast" + "go/token" + "strings" + "sync" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// Collector collects and parses marker comments defined in the registry +// from package source code. If no registry is provided, an empty one will +// be initialized on the first call to MarkersInPackage. +type Collector struct { + *Registry + + byPackage map[string]map[ast.Node]MarkerValues + mu sync.Mutex +} + +// MarkerValues are all the values for some set of markers. +type MarkerValues map[string][]interface{} + +// Get fetches the first value that for the given marker, returning +// nil if no values are available. +func (v MarkerValues) Get(name string) interface{} { + vals := v[name] + if len(vals) == 0 { + return nil + } + return vals[0] +} + +func (c *Collector) init() { + if c.Registry == nil { + c.Registry = &Registry{} + } + if c.byPackage == nil { + c.byPackage = make(map[string]map[ast.Node]MarkerValues) + } +} + +// MarkersInPackage computes the marker values by node for the given package. Results +// are cached by package ID, so this is safe to call repeatedly from different functions. +// Each file in the package is treated as a distinct node. +// +// We consider a marker to be associated with a given AST node if either of the following are true: +// +// - it's in the Godoc for that AST node +// +// - it's in the closest non-godoc comment group above that node, +// *and* that node is a type or field node, *and* [it's either +// registered as type-level *or* it's not registered as being +// package-level] +// +// - it's not in the Godoc of a node, doesn't meet the above criteria, and +// isn't in a struct definition (in which case it's package-level) +func (c *Collector) MarkersInPackage(pkg *loader.Package) (map[ast.Node]MarkerValues, error) { + c.mu.Lock() + c.init() + if markers, exist := c.byPackage[pkg.ID]; exist { + c.mu.Unlock() + return markers, nil + } + // unlock early, it's ok if we do a bit extra work rather than locking while we're working + c.mu.Unlock() + + pkg.NeedSyntax() + nodeMarkersRaw := c.associatePkgMarkers(pkg) + markers, err := c.parseMarkersInPackage(nodeMarkersRaw) + if err != nil { + return nil, err + } + + c.mu.Lock() + defer c.mu.Unlock() + c.byPackage[pkg.ID] = markers + + return markers, nil +} + +// parseMarkersInPackage parses the given raw marker comments into output values using the registry. +func (c *Collector) parseMarkersInPackage(nodeMarkersRaw map[ast.Node][]markerComment) (map[ast.Node]MarkerValues, error) { + var errors []error + nodeMarkerValues := make(map[ast.Node]MarkerValues) + for node, markersRaw := range nodeMarkersRaw { + var target TargetType + switch node.(type) { + case *ast.File: + target = DescribesPackage + case *ast.Field: + target = DescribesField + default: + target = DescribesType + } + markerVals := make(map[string][]interface{}) + for _, markerRaw := range markersRaw { + markerText := markerRaw.Text() + def := c.Registry.Lookup(markerText, target) + if def == nil { + continue + } + val, err := def.Parse(markerText) + if err != nil { + errors = append(errors, loader.ErrFromNode(err, markerRaw)) + continue + } + markerVals[def.Name] = append(markerVals[def.Name], val) + } + nodeMarkerValues[node] = markerVals + } + + return nodeMarkerValues, loader.MaybeErrList(errors) +} + +// associatePkgMarkers associates markers with AST nodes in the given package. +func (c *Collector) associatePkgMarkers(pkg *loader.Package) map[ast.Node][]markerComment { + nodeMarkers := make(map[ast.Node][]markerComment) + for _, file := range pkg.Syntax { + fileNodeMarkers := c.associateFileMarkers(file) + for node, markers := range fileNodeMarkers { + nodeMarkers[node] = append(nodeMarkers[node], markers...) + } + } + + return nodeMarkers +} + +// associateFileMarkers associates markers with AST nodes in the given file. +func (c *Collector) associateFileMarkers(file *ast.File) map[ast.Node][]markerComment { + // grab all the raw marker comments by node + visitor := markerSubVisitor{ + collectPackageLevel: true, + markerVisitor: &markerVisitor{ + nodeMarkers: make(map[ast.Node][]markerComment), + allComments: file.Comments, + }, + } + ast.Walk(visitor, file) + + // grab the last package-level comments at the end of the file (if any) + lastFileMarkers := visitor.markersBetween(false, visitor.commentInd, len(visitor.allComments)) + visitor.pkgMarkers = append(visitor.pkgMarkers, lastFileMarkers...) + + // figure out if any type-level markers are actually package-level markers + for node, markers := range visitor.nodeMarkers { + _, isType := node.(*ast.TypeSpec) + if !isType { + continue + } + endOfMarkers := 0 + for _, marker := range markers { + if marker.fromGodoc { + // markers from godoc are never package level + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + markerText := marker.Text() + typeDef := c.Registry.Lookup(markerText, DescribesType) + if typeDef != nil { + // prefer assuming type-level markers + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + def := c.Registry.Lookup(markerText, DescribesPackage) + if def == nil { + // assume type-level unless proven otherwise + markers[endOfMarkers] = marker + endOfMarkers++ + continue + } + // it's package-level, since a package-level definition exists + visitor.pkgMarkers = append(visitor.pkgMarkers, marker) + } + visitor.nodeMarkers[node] = markers[:endOfMarkers] // re-set after trimming the package markers + } + visitor.nodeMarkers[file] = visitor.pkgMarkers + + return visitor.nodeMarkers +} + +// markerComment is an AST comment that contains a marker. +// It may or may not be from a Godoc comment, which affects +// marker re-associated (from type-level to package-level) +type markerComment struct { + *ast.Comment + fromGodoc bool +} + +// Text returns the text of the marker, stripped of the comment +// marker and leading spaces, as should be passed to Registry.Lookup +// and Registry.Parse. +func (c markerComment) Text() string { + return strings.TrimSpace(c.Comment.Text[2:]) +} + +// markerVisistor visits AST nodes, recording markers associated with each node. +type markerVisitor struct { + allComments []*ast.CommentGroup + commentInd int + + declComments []markerComment + lastLineCommentGroup *ast.CommentGroup + + pkgMarkers []markerComment + nodeMarkers map[ast.Node][]markerComment +} + +// isMarkerComment checks that the given comment is a single-line (`//`) +// comment and it's first non-space content is `+`. +func isMarkerComment(comment string) bool { + if comment[0:2] != "//" { + return false + } + stripped := strings.TrimSpace(comment[2:]) + if len(stripped) < 1 || stripped[0] != '+' { + return false + } + return true +} + +// markersBetween grabs the markers between the given indicies in the list of all comments. +func (v *markerVisitor) markersBetween(fromGodoc bool, start, end int) []markerComment { + if start < 0 || end < 0 { + return nil + } + var res []markerComment + for i := start; i < end; i++ { + commentGroup := v.allComments[i] + for _, comment := range commentGroup.List { + if !isMarkerComment(comment.Text) { + continue + } + res = append(res, markerComment{Comment: comment, fromGodoc: fromGodoc}) + } + } + return res +} + +type markerSubVisitor struct { + *markerVisitor + node ast.Node + collectPackageLevel bool +} + +// Visit collects markers for each node in the AST, optionally +// collecting unassociated markers as package-level. +func (v markerSubVisitor) Visit(node ast.Node) ast.Visitor { + if node == nil { + // end of the node, so we might need to advance comments beyond the end + // of the block if we don't want to collect package-level markers in + // this block. + + if !v.collectPackageLevel { + if v.commentInd < len(v.allComments) { + lastCommentInd := v.commentInd + nextGroup := v.allComments[lastCommentInd] + for nextGroup.Pos() < v.node.End() { + lastCommentInd++ + if lastCommentInd >= len(v.allComments) { + // after the increment so our decrement below still makes sense + break + } + nextGroup = v.allComments[lastCommentInd] + } + v.commentInd = lastCommentInd + } + } + + return nil + } + + // skip comments on the same line as the previous node + // making sure to double-check for the case where we've gone past the end of the comments + // but still have to finish up typespec-gendecl association (see below). + if v.lastLineCommentGroup != nil && v.commentInd < len(v.allComments) && v.lastLineCommentGroup.Pos() == v.allComments[v.commentInd].Pos() { + v.commentInd++ + } + + // stop visiting if there are no more comments in the file + // NB(directxman12): we can't just stop immediately, because we + // still need to check if there are typespecs associated with gendecls. + var markerCommentBlock []markerComment + var docCommentBlock []markerComment + lastCommentInd := v.commentInd + if v.commentInd < len(v.allComments) { + // figure out the first comment after the node in question... + nextGroup := v.allComments[lastCommentInd] + for nextGroup.Pos() < node.Pos() { + lastCommentInd++ + if lastCommentInd >= len(v.allComments) { + // after the increment so our decrement below still makes sense + break + } + nextGroup = v.allComments[lastCommentInd] + } + lastCommentInd-- // ...then decrement to get the last comment before the node in question + + // figure out the godoc comment so we can deal with it separately + var docGroup *ast.CommentGroup + docGroup, v.lastLineCommentGroup = associatedCommentsFor(node) + + // find the last comment group that's not godoc + markerCommentInd := lastCommentInd + if docGroup != nil && v.allComments[markerCommentInd].Pos() == docGroup.Pos() { + markerCommentInd-- + } + + // check if we have freestanding package markers, + // and find the markers in our "closest non-godoc" comment block, + // plus our godoc comment block + if markerCommentInd >= v.commentInd { + if v.collectPackageLevel { + // assume anything between the comment ind and the marker ind (not including it) + // are package-level + v.pkgMarkers = append(v.pkgMarkers, v.markersBetween(false, v.commentInd, markerCommentInd)...) + } + markerCommentBlock = v.markersBetween(false, markerCommentInd, markerCommentInd+1) + docCommentBlock = v.markersBetween(true, markerCommentInd+1, lastCommentInd+1) + } else { + docCommentBlock = v.markersBetween(true, markerCommentInd+1, lastCommentInd+1) + } + } + + resVisitor := markerSubVisitor{ + collectPackageLevel: false, // don't collect package level by default + markerVisitor: v.markerVisitor, + node: node, + } + + // associate those markers with a node + switch typedNode := node.(type) { + case *ast.GenDecl: + // save the comments associated with the gen-decl if it's a single-line type decl + if typedNode.Lparen != token.NoPos || typedNode.Tok != token.TYPE { + // not a single-line type spec, treat them as free comments + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + break + } + // save these, we'll need them when we encounter the actual type spec + v.declComments = append(v.declComments, markerCommentBlock...) + v.declComments = append(v.declComments, docCommentBlock...) + case *ast.TypeSpec: + // add in comments attributed to the gen-decl, if any, + // as well as comments associated with the actual type + v.nodeMarkers[node] = append(v.nodeMarkers[node], v.declComments...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], markerCommentBlock...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], docCommentBlock...) + + v.declComments = nil + v.collectPackageLevel = false // don't collect package-level inside type structs + case *ast.Field: + v.nodeMarkers[node] = append(v.nodeMarkers[node], markerCommentBlock...) + v.nodeMarkers[node] = append(v.nodeMarkers[node], docCommentBlock...) + case *ast.File: + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + v.pkgMarkers = append(v.pkgMarkers, docCommentBlock...) + + // collect markers in root file scope + resVisitor.collectPackageLevel = true + default: + // assume markers before anything else are package-level markers, + // *but* don't include any markers in godoc + if v.collectPackageLevel { + v.pkgMarkers = append(v.pkgMarkers, markerCommentBlock...) + } + } + + // increment the comment ind so that we start at the right place for the next node + v.commentInd = lastCommentInd + 1 + + return resVisitor + +} + +// associatedCommentsFor returns the doc comment group (if relevant and present) and end-of-line comment +// (again if relevant and present) for the given AST node. +func associatedCommentsFor(node ast.Node) (docGroup *ast.CommentGroup, lastLineCommentGroup *ast.CommentGroup) { + switch typedNode := node.(type) { + case *ast.Field: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.File: + docGroup = typedNode.Doc + case *ast.FuncDecl: + docGroup = typedNode.Doc + case *ast.GenDecl: + docGroup = typedNode.Doc + case *ast.ImportSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.TypeSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + case *ast.ValueSpec: + docGroup = typedNode.Doc + lastLineCommentGroup = typedNode.Comment + default: + lastLineCommentGroup = nil + } + + return docGroup, lastLineCommentGroup +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go new file mode 100644 index 000000000..707036de8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/doc.go @@ -0,0 +1,113 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package markers contains utilities for defining and parsing "marker +// comments", also occasionally called tag comments (we use the term marker to +// avoid confusing with struct tags). Parsed result (output) values take the +// form of Go values, much like the "encoding/json" package. +// +// Definitions and Parsing +// +// Markers are defined as structured Definitions which can be used to +// consistently parse marker comments. A Definition contains an concrete +// output type for the marker, which can be a simple type (like string), a +// struct, or a wrapper type (useful for defining additional methods on marker +// types). +// +// Markers take the general form +// +// +path:to:marker=val +// +// +path:to:marker:arg1=val,arg2=val2 +// +// +path:to:marker +// +// Arguments may be ints, bools, strings, and slices. Ints and bool take their +// standard form from Go. Strings may take any of their standard forms, or any +// sequence of unquoted characters up until a `,` or `;` is encountered. Lists +// take either of the following forms: +// +// val;val;val +// +// {val, val, val} +// +// Note that the first form will not properly parse nested slices, but is +// generally convenient and is the form used in many existing markers. +// +// Each of those argument types maps to the corresponding go type. Pointers +// mark optional fields (a struct tag, below, may also be used). The empty +// interface will match any type. +// +// Struct fields may optionally be annotated with the `marker` struct tag. The +// first argument is a name override. If it's left blank (or the tag isn't +// present), the camelCase version of the name will be used. The only +// additional argument defined is `optional`, which marks a field as optional +// without using a pointer. +// +// All parsed values are unmarshalled into the output type. If any +// non-optional fields aren't mentioned, an error will be raised unless +// `Strict` is set to false. +// +// Registries and Lookup +// +// Definitions can be added to registries to facilitate lookups. Each +// definition is marked as either describing a type, struct field, or package +// (unassociated). The same marker name may be registered multiple times, as +// long as each describes a different construct (type, field, or package). +// Definitions can then be looked up by passing unparsed markers. +// +// Collection and Extraction +// +// Markers can be collected from a loader.Package using a Collector. The +// Collector will read from a given Registry, collecting comments that look +// like markers and parsing them if they match some definition on the registry. +// +// Markers are considered associated with a particular field or type if they +// exist in the Godoc, or the closest non-godoc comment. Any other markers not +// inside a some other block (e.g. a struct definition, interface definition, +// etc) are considered package level. Markers in a "closest non-Go comment +// block" may also be considered package level if registered as such and no +// identical type-level definition exists. +// +// Like loader.Package, Collector's methods are idempotent and will not +// reperform work. +// +// Traversal +// +// EachType function iterates over each type in a Package, providing +// conveniently structured type and field information with marker values +// associated. +// +// PackageMarkers can be used to fetch just package-level markers. +// +// Help +// +// Help can be defined for each marker using the DefinitionHelp struct. It's +// mostly intended to be generated off of godocs using cmd/helpgen, which takes +// the first line as summary (removing the type/field name), and considers the +// rest as details. It looks for the +// +// +controllertools:generateHelp[:category=] +// +// marker to start generation. +// +// If you can't use godoc-based generation for whatever reasons (e.g. +// primitive-typed markers), you can use the SimpleHelp and DeprecatedHelp +// helper functions to generate help structs. +// +// Help is then registered into a registry as associated with the actual +// definition, and can then be later retrieved from the registry. +package markers diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go new file mode 100644 index 000000000..26ca059bc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/help.go @@ -0,0 +1,81 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +// You *probably* don't want to write these structs by hand +// -- use cmd/helpgen if you can write Godoc, and {Simple,Deprecated}Help +// otherwise. + +// DetailedHelp contains brief help, as well as more details. +// For the "full" help, join the two together. +type DetailedHelp struct { + Summary string + Details string +} + +// DefinitionHelp contains overall help for a marker Definition, +// as well as per-field help. +type DefinitionHelp struct { + // DetailedHelp contains the overall help for the marker. + DetailedHelp + // Category describes what kind of marker this is. + Category string + // DeprecatedInFavorOf marks the marker as deprecated. + // If non-nil & empty, it's assumed to just mean deprecated permanently. + // If non-empty, it's assumed to be a marker name. + DeprecatedInFavorOf *string + + // NB(directxman12): we make FieldHelp be in terms of the Go struct field + // names so that we don't have to know the conversion or processing rules + // for struct fields at compile-time for help generation. + + // FieldHelp defines the per-field help for this marker, *in terms of the + // go struct field names. Use the FieldsHelp method to map this to + // marker argument names. + FieldHelp map[string]DetailedHelp +} + +// FieldsHelp maps per-field help to the actual marker argument names from the +// given definition. +func (d *DefinitionHelp) FieldsHelp(def *Definition) map[string]DetailedHelp { + fieldsHelp := make(map[string]DetailedHelp, len(def.FieldNames)) + for fieldName, argName := range def.FieldNames { + fieldsHelp[fieldName] = d.FieldHelp[argName] + } + return fieldsHelp +} + +// SimpleHelp returns help that just has marker-level summary information +// (e.g. for use with empty or primitive-typed markers, where Godoc-based +// generation isn't possible). +func SimpleHelp(category, summary string) *DefinitionHelp { + return &DefinitionHelp{ + Category: category, + DetailedHelp: DetailedHelp{Summary: summary}, + } +} + +// DeprecatedHelp returns simple help (a la SimpleHelp), except marked as +// deprecated in favor of the given marker (or an empty string for just +// deprecated). +func DeprecatedHelp(inFavorOf, category, summary string) *DefinitionHelp { + return &DefinitionHelp{ + Category: category, + DetailedHelp: DetailedHelp{Summary: summary}, + DeprecatedInFavorOf: &inFavorOf, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go new file mode 100644 index 000000000..4034c25ab --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/parse.go @@ -0,0 +1,908 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + sc "text/scanner" + "unicode" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// expect checks that the next token of the scanner is the given token, adding an error +// to the scanner if not. It returns whether the token was as expected. +func expect(scanner *sc.Scanner, expected rune, errDesc string) bool { + tok := scanner.Scan() + if tok != expected { + scanner.Error(scanner, fmt.Sprintf("expected %s, got %q", errDesc, scanner.TokenText())) + return false + } + return true +} + +// peekNoSpace is equivalent to scanner.Peek, except that it will consume intervening whitespace. +func peekNoSpace(scanner *sc.Scanner) rune { + hint := scanner.Peek() + for ; hint <= rune(' ') && ((1<") + case IntType: + out.WriteString("int") + case StringType: + out.WriteString("string") + case BoolType: + out.WriteString("bool") + case AnyType: + out.WriteString("") + case SliceType: + out.WriteString("[]") + // arguments can't be non-pointer optional, so just call into typeString again. + a.ItemType.typeString(out) + case MapType: + out.WriteString("map[string]") + a.ItemType.typeString(out) + case RawType: + out.WriteString("") + } +} + +// TypeString returns a string roughly equivalent +// (but not identical) to the underlying Go type that +// this argument would parse to. It's mainly useful +// for user-friendly formatting of this argument (e.g. +// help strings). +func (a Argument) TypeString() string { + out := &strings.Builder{} + a.typeString(out) + return out.String() +} + +func (a Argument) String() string { + if a.Optional { + return fmt.Sprintf("", a.TypeString()) + } + return fmt.Sprintf("", a.TypeString()) +} + +// castAndSet casts val to out's type if needed, +// then sets out to val. +func castAndSet(out, val reflect.Value) { + outType := out.Type() + if outType != val.Type() { + val = val.Convert(outType) + } + out.Set(val) +} + +// makeSliceType makes a reflect.Type for a slice of the given type. +// Useful for constructing the out value for when AnyType's guess returns a slice. +func makeSliceType(itemType Argument) (reflect.Type, error) { + var itemReflectedType reflect.Type + switch itemType.Type { + case IntType: + itemReflectedType = reflect.TypeOf(int(0)) + case StringType: + itemReflectedType = reflect.TypeOf("") + case BoolType: + itemReflectedType = reflect.TypeOf(false) + case SliceType: + subItemType, err := makeSliceType(*itemType.ItemType) + if err != nil { + return nil, err + } + itemReflectedType = subItemType + case MapType: + subItemType, err := makeMapType(*itemType.ItemType) + if err != nil { + return nil, err + } + itemReflectedType = subItemType + // TODO(directxman12): support non-uniform slices? (probably not) + default: + return nil, fmt.Errorf("invalid type when constructing guessed slice out: %v", itemType.Type) + } + + if itemType.Pointer { + itemReflectedType = reflect.PtrTo(itemReflectedType) + } + + return reflect.SliceOf(itemReflectedType), nil +} + +// makeMapType makes a reflect.Type for a map of the given item type. +// Useful for constructing the out value for when AnyType's guess returns a map. +func makeMapType(itemType Argument) (reflect.Type, error) { + var itemReflectedType reflect.Type + switch itemType.Type { + case IntType: + itemReflectedType = reflect.TypeOf(int(0)) + case StringType: + itemReflectedType = reflect.TypeOf("") + case BoolType: + itemReflectedType = reflect.TypeOf(false) + case SliceType: + subItemType, err := makeSliceType(*itemType.ItemType) + if err != nil { + return nil, err + } + itemReflectedType = subItemType + // TODO(directxman12): support non-uniform slices? (probably not) + case MapType: + subItemType, err := makeMapType(*itemType.ItemType) + if err != nil { + return nil, err + } + itemReflectedType = subItemType + case AnyType: + // NB(directxman12): maps explicitly allow non-uniform item types, unlike slices at the moment + itemReflectedType = interfaceType + default: + return nil, fmt.Errorf("invalid type when constructing guessed slice out: %v", itemType.Type) + } + + if itemType.Pointer { + itemReflectedType = reflect.PtrTo(itemReflectedType) + } + + return reflect.MapOf(reflect.TypeOf(""), itemReflectedType), nil +} + +// guessType takes an educated guess about the type of the next field. If allowSlice +// is false, it will not guess slices. It's less efficient than parsing with actual +// type information, since we need to allocate to peek ahead full tokens, and the scanner +// only allows peeking ahead one character. +// Maps are *always* non-uniform (i.e. type the AnyType item type), since they're frequently +// used to represent things like defaults for an object in JSON. +func guessType(scanner *sc.Scanner, raw string, allowSlice bool) *Argument { + if allowSlice { + maybeItem := guessType(scanner, raw, false) + + subRaw := raw[scanner.Pos().Offset:] + subScanner := parserScanner(subRaw, scanner.Error) + + var tok rune + for tok = subScanner.Scan(); tok != ',' && tok != sc.EOF && tok != ';'; tok = subScanner.Scan() { + // wait till we get something interesting + } + + // semicolon means it's a legacy slice + if tok == ';' { + return &Argument{ + Type: SliceType, + ItemType: maybeItem, + } + } + + return maybeItem + } + + // everything else needs a duplicate scanner to scan properly + // (so we don't consume our scanner tokens until we actually + // go to use this -- Go doesn't like scanners that can be rewound). + subRaw := raw[scanner.Pos().Offset:] + subScanner := parserScanner(subRaw, scanner.Error) + + // skip whitespace + hint := peekNoSpace(subScanner) + + // first, try the easy case -- quoted strings strings + switch hint { + case '"', '\'', '`': + return &Argument{Type: StringType} + } + + // next, check for slices or maps + if hint == '{' { + subScanner.Scan() + + // TODO(directxman12): this can't guess at empty objects, but that's generally ok. + // We'll cross that bridge when we get there. + + // look ahead till we can figure out if this is a map or a slice + firstElemType := guessType(subScanner, subRaw, false) + if firstElemType.Type == StringType { + // might be a map or slice, parse the string and check for colon + // (blech, basically arbitrary look-ahead due to raw strings). + var keyVal string // just ignore this + (&Argument{Type: StringType}).parseString(subScanner, raw, reflect.Indirect(reflect.ValueOf(&keyVal))) + + if subScanner.Scan() == ':' { + // it's got a string followed by a colon -- it's a map + return &Argument{ + Type: MapType, + ItemType: &Argument{Type: AnyType}, + } + } + } + + // definitely a slice -- maps have to have string keys and have a value followed by a colon + return &Argument{ + Type: SliceType, + ItemType: firstElemType, + } + } + + // then, bools... + probablyString := false + if hint == 't' || hint == 'f' { + // maybe a bool + if nextTok := subScanner.Scan(); nextTok == sc.Ident { + switch subScanner.TokenText() { + case "true", "false": + // definitely a bool + return &Argument{Type: BoolType} + } + // probably a string + probablyString = true + } else { + // we shouldn't ever get here + scanner.Error(scanner, fmt.Sprintf("got a token (%q) that looked like an ident, but was not", scanner.TokenText())) + return &Argument{Type: InvalidType} + } + } + + if !probablyString { + if nextTok := subScanner.Scan(); nextTok == sc.Int { + return &Argument{Type: IntType} + } + } + + // otherwise assume bare strings + return &Argument{Type: StringType} +} + +// parseString parses either of the two accepted string forms (quoted, or bare tokens). +func (a *Argument) parseString(scanner *sc.Scanner, raw string, out reflect.Value) { + // strings are a bit weird -- the "easy" case is quoted strings (tokenized as strings), + // the "hard" case (present for backwards compat) is a bare sequence of tokens that aren't + // a comma. + tok := scanner.Scan() + if tok == sc.String || tok == sc.RawString { + // the easy case + val, err := strconv.Unquote(scanner.TokenText()) + if err != nil { + scanner.Error(scanner, fmt.Sprintf("unable to parse string: %v", err)) + return + } + castAndSet(out, reflect.ValueOf(val)) + return + } + + // the "hard" case -- bare tokens not including ',' (the argument + // separator), ';' (the slice separator), ':' (the map separator), or '}' + // (delimitted slice ender) + startPos := scanner.Position.Offset + for hint := peekNoSpace(scanner); hint != ',' && hint != ';' && hint != ':' && hint != '}' && hint != sc.EOF; hint = peekNoSpace(scanner) { + // skip this token + scanner.Scan() + } + endPos := scanner.Position.Offset + len(scanner.TokenText()) + castAndSet(out, reflect.ValueOf(raw[startPos:endPos])) +} + +// parseSlice parses either of the two slice forms (curly-brace-delimitted and semicolon-separated). +func (a *Argument) parseSlice(scanner *sc.Scanner, raw string, out reflect.Value) { + // slices have two supported formats, like string: + // - `{val, val, val}` (preferred) + // - `val;val;val` (legacy) + resSlice := reflect.Zero(out.Type()) + elem := reflect.Indirect(reflect.New(out.Type().Elem())) + + // preferred case + if peekNoSpace(scanner) == '{' { + // NB(directxman12): supporting delimitted slices in bare slices + // would require an extra look-ahead here :-/ + + scanner.Scan() // skip '{' + for hint := peekNoSpace(scanner); hint != '}' && hint != sc.EOF; hint = peekNoSpace(scanner) { + a.ItemType.parse(scanner, raw, elem, true /* parsing a slice */) + resSlice = reflect.Append(resSlice, elem) + tok := peekNoSpace(scanner) + if tok == '}' { + break + } + if !expect(scanner, ',', "comma") { + return + } + } + if !expect(scanner, '}', "close curly brace") { + return + } + castAndSet(out, resSlice) + return + } + + // legacy case + for hint := peekNoSpace(scanner); hint != ',' && hint != '}' && hint != sc.EOF; hint = peekNoSpace(scanner) { + a.ItemType.parse(scanner, raw, elem, true /* parsing a slice */) + resSlice = reflect.Append(resSlice, elem) + tok := peekNoSpace(scanner) + if tok == ',' || tok == '}' || tok == sc.EOF { + break + } + scanner.Scan() + if tok != ';' { + scanner.Error(scanner, fmt.Sprintf("expected comma, got %q", scanner.TokenText())) + return + } + } + castAndSet(out, resSlice) +} + +// parseMap parses a map of the form {string: val, string: val, string: val} +func (a *Argument) parseMap(scanner *sc.Scanner, raw string, out reflect.Value) { + resMap := reflect.MakeMap(out.Type()) + elem := reflect.Indirect(reflect.New(out.Type().Elem())) + key := reflect.Indirect(reflect.New(out.Type().Key())) + + if !expect(scanner, '{', "open curly brace") { + return + } + + for hint := peekNoSpace(scanner); hint != '}' && hint != sc.EOF; hint = peekNoSpace(scanner) { + a.parseString(scanner, raw, key) + if !expect(scanner, ':', "colon") { + return + } + a.ItemType.parse(scanner, raw, elem, false /* not in a slice */) + resMap.SetMapIndex(key, elem) + + if peekNoSpace(scanner) == '}' { + break + } + if !expect(scanner, ',', "comma") { + return + } + } + + if !expect(scanner, '}', "close curly brace") { + return + } + + castAndSet(out, resMap) +} + +// parse functions like Parse, except that it allows passing down whether or not we're +// already in a slice, to avoid duplicate legacy slice detection for AnyType +func (a *Argument) parse(scanner *sc.Scanner, raw string, out reflect.Value, inSlice bool) { + // nolint:gocyclo + if a.Type == InvalidType { + scanner.Error(scanner, fmt.Sprintf("cannot parse invalid type")) + return + } + if a.Pointer { + out.Set(reflect.New(out.Type().Elem())) + out = reflect.Indirect(out) + } + switch a.Type { + case RawType: + // raw consumes everything else + castAndSet(out, reflect.ValueOf(raw[scanner.Pos().Offset:])) + // consume everything else + for tok := scanner.Scan(); tok != sc.EOF; tok = scanner.Scan() { + } + case IntType: + if !expect(scanner, sc.Int, "integer") { + return + } + // TODO(directxman12): respect the size when parsing + val, err := strconv.Atoi(scanner.TokenText()) + if err != nil { + scanner.Error(scanner, fmt.Sprintf("unable to parse integer: %v", err)) + return + } + castAndSet(out, reflect.ValueOf(val)) + case StringType: + // strings are a bit weird -- the "easy" case is quoted strings (tokenized as strings), + // the "hard" case (present for backwards compat) is a bare sequence of tokens that aren't + // a comma. + a.parseString(scanner, raw, out) + case BoolType: + if !expect(scanner, sc.Ident, "true or false") { + return + } + switch scanner.TokenText() { + case "true": + castAndSet(out, reflect.ValueOf(true)) + case "false": + castAndSet(out, reflect.ValueOf(false)) + default: + scanner.Error(scanner, fmt.Sprintf("expected true or false, got %q", scanner.TokenText())) + return + } + case AnyType: + guessedType := guessType(scanner, raw, !inSlice) + newOut := out + + // we need to be able to construct the right element types, below + // in parse, so construct a concretely-typed value to use as "out" + switch guessedType.Type { + case SliceType: + newType, err := makeSliceType(*guessedType.ItemType) + if err != nil { + scanner.Error(scanner, err.Error()) + return + } + newOut = reflect.Indirect(reflect.New(newType)) + case MapType: + newType, err := makeMapType(*guessedType.ItemType) + if err != nil { + scanner.Error(scanner, err.Error()) + return + } + newOut = reflect.Indirect(reflect.New(newType)) + } + if !newOut.CanSet() { + panic("at the disco") // TODO(directxman12): this is left over from debugging -- it might need to be an error + } + guessedType.Parse(scanner, raw, newOut) + castAndSet(out, newOut) + case SliceType: + // slices have two supported formats, like string: + // - `{val, val, val}` (preferred) + // - `val;val;val` (legacy) + a.parseSlice(scanner, raw, out) + case MapType: + // maps are {string: val, string: val, string: val} + a.parseMap(scanner, raw, out) + } +} + +// Parse attempts to consume the argument from the given scanner (based on the given +// raw input as well for collecting ranges of content), and places the output value +// in the given reflect.Value. Errors are reported via the given scanner. +func (a *Argument) Parse(scanner *sc.Scanner, raw string, out reflect.Value) { + a.parse(scanner, raw, out, false) +} + +// ArgumentFromType constructs an Argument by examining the given +// raw reflect.Type. It can construct arguments from the Go types +// corresponding to any of the types listed in ArgumentType. +func ArgumentFromType(rawType reflect.Type) (Argument, error) { + if rawType == rawArgsType { + return Argument{ + Type: RawType, + }, nil + } + + if rawType == interfaceType { + return Argument{ + Type: AnyType, + }, nil + } + + arg := Argument{} + if rawType.Kind() == reflect.Ptr { + rawType = rawType.Elem() + arg.Pointer = true + arg.Optional = true + } + + switch rawType.Kind() { + case reflect.String: + arg.Type = StringType + case reflect.Int, reflect.Int32: // NB(directxman12): all ints in kubernetes are int32, so explicitly support that + arg.Type = IntType + case reflect.Bool: + arg.Type = BoolType + case reflect.Slice: + arg.Type = SliceType + itemType, err := ArgumentFromType(rawType.Elem()) + if err != nil { + return Argument{}, fmt.Errorf("bad slice item type: %w", err) + } + arg.ItemType = &itemType + case reflect.Map: + arg.Type = MapType + if rawType.Key().Kind() != reflect.String { + return Argument{}, fmt.Errorf("bad map key type: map keys must be strings") + } + itemType, err := ArgumentFromType(rawType.Elem()) + if err != nil { + return Argument{}, fmt.Errorf("bad slice item type: %w", err) + } + arg.ItemType = &itemType + default: + return Argument{}, fmt.Errorf("type has unsupported kind %s", rawType.Kind()) + } + + return arg, nil +} + +// TargetType describes which kind of node a given marker is associated with. +type TargetType int + +const ( + // DescribesPackage indicates that a marker is associated with a package. + DescribesPackage TargetType = iota + // DescribesType indicates that a marker is associated with a type declaration. + DescribesType + // DescribesField indicates that a marker is associated with a struct field. + DescribesField +) + +func (t TargetType) String() string { + switch t { + case DescribesPackage: + return "package" + case DescribesType: + return "type" + case DescribesField: + return "field" + default: + return "(unknown)" + } +} + +// Definition is a parsed definition of a marker. +type Definition struct { + // Output is the deserialized Go type of the marker. + Output reflect.Type + // Name is the marker's name. + Name string + // Target indicates which kind of node this marker can be associated with. + Target TargetType + // Fields lists out the types of each field that this marker has, by + // argument name as used in the marker (if the output type isn't a struct, + // it'll have a single, blank field name). This only lists exported fields, + // (as per reflection rules). + Fields map[string]Argument + // FieldNames maps argument names (as used in the marker) to struct field name + // in the output type. + FieldNames map[string]string + // Strict indicates that this definition should error out when parsing if + // not all non-optional fields were seen. + Strict bool +} + +// AnonymousField indicates that the definition has one field, +// (actually the original object), and thus the field +// doesn't get named as part of the name. +func (d *Definition) AnonymousField() bool { + if len(d.Fields) != 1 { + return false + } + _, hasAnonField := d.Fields[""] + return hasAnonField +} + +// Empty indicates that this definition has no fields. +func (d *Definition) Empty() bool { + return len(d.Fields) == 0 +} + +// argumentInfo returns information about an argument field as the marker parser's field loader +// would see it. This can be useful if you have to interact with marker definition structs +// externally (e.g. at compile time). +func argumentInfo(fieldName string, tag reflect.StructTag) (argName string, optionalOpt bool) { + argName = lowerCamelCase(fieldName) + markerTag, tagSpecified := tag.Lookup("marker") + markerTagParts := strings.Split(markerTag, ",") + if tagSpecified && markerTagParts[0] != "" { + // allow overriding to support legacy cases where we don't follow camelCase conventions + argName = markerTagParts[0] + } + optionalOpt = false + for _, tagOption := range markerTagParts[1:] { + switch tagOption { + case "optional": + optionalOpt = true + } + } + + return argName, optionalOpt +} + +// loadFields uses reflection to populate argument information from the Output type. +func (d *Definition) loadFields() error { + if d.Fields == nil { + d.Fields = make(map[string]Argument) + d.FieldNames = make(map[string]string) + } + if d.Output.Kind() != reflect.Struct { + // anonymous field type + argType, err := ArgumentFromType(d.Output) + if err != nil { + return err + } + d.Fields[""] = argType + d.FieldNames[""] = "" + return nil + } + + for i := 0; i < d.Output.NumField(); i++ { + field := d.Output.Field(i) + if field.PkgPath != "" { + // as per the reflect package docs, pkgpath is empty for exported fields, + // so non-empty package path means a private field, which we should skip + continue + } + argName, optionalOpt := argumentInfo(field.Name, field.Tag) + + argType, err := ArgumentFromType(field.Type) + if err != nil { + return fmt.Errorf("unable to extract type information for field %q: %w", field.Name, err) + } + + if argType.Type == RawType { + return fmt.Errorf("RawArguments must be the direct type of a marker, and not a field") + } + + argType.Optional = optionalOpt || argType.Optional + + d.Fields[argName] = argType + d.FieldNames[argName] = field.Name + } + + return nil +} + +// parserScanner makes a new scanner appropriate for use in parsing definitions and arguments. +func parserScanner(raw string, err func(*sc.Scanner, string)) *sc.Scanner { + scanner := &sc.Scanner{} + scanner.Init(bytes.NewBufferString(raw)) + scanner.Mode = sc.ScanIdents | sc.ScanInts | sc.ScanStrings | sc.ScanRawStrings | sc.SkipComments + scanner.Error = err + + return scanner +} + +// Parse uses the type information in this Definition to parse the given +// raw marker in the form `+a:b:c=arg,d=arg` into an output object of the +// type specified in the definition. +func (d *Definition) Parse(rawMarker string) (interface{}, error) { + name, anonName, fields := splitMarker(rawMarker) + + out := reflect.Indirect(reflect.New(d.Output)) + + // if we're a not a struct or have no arguments, treat the full `a:b:c` as the name, + // otherwise, treat `c` as a field name, and `a:b` as the marker name. + if !d.AnonymousField() && !d.Empty() && len(anonName) >= len(name)+1 { + fields = anonName[len(name)+1:] + "=" + fields + } + + var errs []error + scanner := parserScanner(fields, func(scanner *sc.Scanner, msg string) { + errs = append(errs, &ScannerError{Msg: msg, Pos: scanner.Position}) + }) + + // TODO(directxman12): strict parsing where we error out if certain fields aren't optional + seen := make(map[string]struct{}, len(d.Fields)) + if d.AnonymousField() && scanner.Peek() != sc.EOF { + // might still be a struct that something fiddled with, so double check + structFieldName := d.FieldNames[""] + outTarget := out + if structFieldName != "" { + // it's a struct field mapped to an anonymous marker + outTarget = out.FieldByName(structFieldName) + if !outTarget.CanSet() { + scanner.Error(scanner, fmt.Sprintf("cannot set field %q (might not exist)", structFieldName)) + return out.Interface(), loader.MaybeErrList(errs) + } + } + + // no need for trying to parse field names if we're not a struct + field := d.Fields[""] + field.Parse(scanner, fields, outTarget) + seen[""] = struct{}{} // mark as seen for strict definitions + } else if !d.Empty() && scanner.Peek() != sc.EOF { + // if we expect *and* actually have arguments passed + for { + // parse the argument name + if !expect(scanner, sc.Ident, "argument name") { + break + } + argName := scanner.TokenText() + if !expect(scanner, '=', "equals") { + break + } + + // make sure we know the field + fieldName, known := d.FieldNames[argName] + if !known { + scanner.Error(scanner, fmt.Sprintf("unknown argument %q", argName)) + break + } + fieldType, known := d.Fields[argName] + if !known { + scanner.Error(scanner, fmt.Sprintf("unknown argument %q", argName)) + break + } + seen[argName] = struct{}{} // mark as seen for strict definitions + + // parse the field value + fieldVal := out.FieldByName(fieldName) + if !fieldVal.CanSet() { + scanner.Error(scanner, fmt.Sprintf("cannot set field %q (might not exist)", fieldName)) + break + } + fieldType.Parse(scanner, fields, fieldVal) + + if len(errs) > 0 { + break + } + + if scanner.Peek() == sc.EOF { + break + } + if !expect(scanner, ',', "comma") { + break + } + } + } + + if tok := scanner.Scan(); tok != sc.EOF { + scanner.Error(scanner, fmt.Sprintf("extra arguments provided: %q", fields[scanner.Position.Offset:])) + } + + if d.Strict { + for argName, arg := range d.Fields { + if _, wasSeen := seen[argName]; !wasSeen && !arg.Optional { + scanner.Error(scanner, fmt.Sprintf("missing argument %q", argName)) + } + } + } + + return out.Interface(), loader.MaybeErrList(errs) +} + +// MakeDefinition constructs a definition from a name, type, and the output type. +// All such definitions are strict by default. If a struct is passed as the output +// type, its public fields will automatically be populated into Fields (and similar +// fields in Definition). Other values will have a single, empty-string-named Fields +// entry. +func MakeDefinition(name string, target TargetType, output interface{}) (*Definition, error) { + def := &Definition{ + Name: name, + Target: target, + Output: reflect.TypeOf(output), + Strict: true, + } + + if err := def.loadFields(); err != nil { + return nil, err + } + + return def, nil +} + +// MakeAnyTypeDefinition constructs a definition for an output struct with a +// field named `Value` of type `interface{}`. The argument to the marker will +// be parsed as AnyType and assigned to the field named `Value`. +func MakeAnyTypeDefinition(name string, target TargetType, output interface{}) (*Definition, error) { + defn, err := MakeDefinition(name, target, output) + if err != nil { + return nil, err + } + defn.FieldNames = map[string]string{"": "Value"} + defn.Fields = map[string]Argument{"": defn.Fields["value"]} + return defn, nil +} + +// splitMarker takes a marker in the form of `+a:b:c=arg,d=arg` and splits it +// into the name (`a:b`), the name if it's not a struct (`a:b:c`), and the parts +// that are definitely fields (`arg,d=arg`). +func splitMarker(raw string) (name string, anonymousName string, restFields string) { + raw = raw[1:] // get rid of the leading '+' + nameFieldParts := strings.SplitN(raw, "=", 2) + if len(nameFieldParts) == 1 { + return nameFieldParts[0], nameFieldParts[0], "" + } + anonymousName = nameFieldParts[0] + name = anonymousName + restFields = nameFieldParts[1] + + nameParts := strings.Split(name, ":") + if len(nameParts) > 1 { + name = strings.Join(nameParts[:len(nameParts)-1], ":") + } + return name, anonymousName, restFields +} + +type ScannerError struct { + Msg string + Pos sc.Position +} + +func (e *ScannerError) Error() string { + return fmt.Sprintf("%s (at %s)", e.Msg, e.Pos) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go new file mode 100644 index 000000000..b0124630f --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/reg.go @@ -0,0 +1,153 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "fmt" + "sync" +) + +// Registry keeps track of registered definitions, and allows for easy lookup. +// It's thread-safe, and the zero-value can be safely used. +type Registry struct { + forPkg map[string]*Definition + forType map[string]*Definition + forField map[string]*Definition + helpFor map[*Definition]*DefinitionHelp + + mu sync.RWMutex + initOnce sync.Once +} + +func (r *Registry) init() { + r.initOnce.Do(func() { + if r.forPkg == nil { + r.forPkg = make(map[string]*Definition) + } + if r.forType == nil { + r.forType = make(map[string]*Definition) + } + if r.forField == nil { + r.forField = make(map[string]*Definition) + } + if r.helpFor == nil { + r.helpFor = make(map[*Definition]*DefinitionHelp) + } + }) +} + +// Define defines a new marker with the given name, target, and output type. +// It's a shortcut around +// r.Register(MakeDefinition(name, target, obj)) +func (r *Registry) Define(name string, target TargetType, obj interface{}) error { + def, err := MakeDefinition(name, target, obj) + if err != nil { + return err + } + return r.Register(def) +} + +// Register registers the given marker definition with this registry for later lookup. +func (r *Registry) Register(def *Definition) error { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + switch def.Target { + case DescribesPackage: + r.forPkg[def.Name] = def + case DescribesType: + r.forType[def.Name] = def + case DescribesField: + r.forField[def.Name] = def + default: + return fmt.Errorf("unknown target type %v", def.Target) + } + return nil +} + +// AddHelp stores the given help in the registry, marking it as associated with +// the given definition. +func (r *Registry) AddHelp(def *Definition, help *DefinitionHelp) { + r.init() + + r.mu.Lock() + defer r.mu.Unlock() + + r.helpFor[def] = help +} + +// Lookup fetches the definition corresponding to the given name and target type. +func (r *Registry) Lookup(name string, target TargetType) *Definition { + r.init() + + r.mu.RLock() + defer r.mu.RUnlock() + + switch target { + case DescribesPackage: + return tryAnonLookup(name, r.forPkg) + case DescribesType: + return tryAnonLookup(name, r.forType) + case DescribesField: + return tryAnonLookup(name, r.forField) + default: + return nil + } +} + +// HelpFor fetches the help for a given definition, if present. +func (r *Registry) HelpFor(def *Definition) *DefinitionHelp { + r.init() + + r.mu.RLock() + defer r.mu.RUnlock() + + return r.helpFor[def] +} + +// AllDefinitions returns all marker definitions known to this registry. +func (r *Registry) AllDefinitions() []*Definition { + res := make([]*Definition, 0, len(r.forPkg)+len(r.forType)+len(r.forField)) + for _, def := range r.forPkg { + res = append(res, def) + } + for _, def := range r.forType { + res = append(res, def) + } + for _, def := range r.forField { + res = append(res, def) + } + return res +} + +// tryAnonLookup tries looking up the given marker as both an struct-based +// marker and an anonymous marker, returning whichever format matches first, +// preferring the longer (anonymous) name in case of conflicts. +func tryAnonLookup(name string, defs map[string]*Definition) *Definition { + // NB(directxman12): we look up anonymous names first to work with + // legacy style marker definitions that have a namespaced approach + // (e.g. deepcopy-gen, which uses `+k8s:deepcopy-gen=foo,bar` *and* + // `+k8s.io:deepcopy-gen:interfaces=foo`). + name, anonName, _ := splitMarker(name) + if def, exists := defs[anonName]; exists { + return def + } + + return defs[name] +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go new file mode 100644 index 000000000..a9160c3c2 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/regutil.go @@ -0,0 +1,36 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +// Must panics on errors creating definitions. +func Must(def *Definition, err error) *Definition { + if err != nil { + panic(err) + } + return def +} + +// RegisterAll attempts to register all definitions against the given registry, +// stopping and returning if an error occurs. +func RegisterAll(reg *Registry, defs ...*Definition) error { + for _, def := range defs { + if err := reg.Register(def); err != nil { + return err + } + } + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go b/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go new file mode 100644 index 000000000..a9b4c98af --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/markers/zip.go @@ -0,0 +1,191 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package markers + +import ( + "go/ast" + "go/token" + "reflect" + "strings" + + "sigs.k8s.io/controller-tools/pkg/loader" +) + +// extractDoc extracts documentation from the given node, skipping markers +// in the godoc and falling back to the decl if necessary (for single-line decls). +func extractDoc(node ast.Node, decl *ast.GenDecl) string { + var docs *ast.CommentGroup + switch docced := node.(type) { + case *ast.Field: + docs = docced.Doc + case *ast.File: + docs = docced.Doc + case *ast.GenDecl: + docs = docced.Doc + case *ast.TypeSpec: + docs = docced.Doc + // type Ident expr expressions get docs attached to the decl, + // so check for that case (missing Lparen == single line type decl) + if docs == nil && decl.Lparen == token.NoPos { + docs = decl.Doc + } + } + + if docs == nil { + return "" + } + + // filter out markers + var outGroup ast.CommentGroup + outGroup.List = make([]*ast.Comment, 0, len(docs.List)) + for _, comment := range docs.List { + if isMarkerComment(comment.Text) { + continue + } + outGroup.List = append(outGroup.List, comment) + } + + // split lines, and re-join together as a single + // paragraph, respecting double-newlines as + // paragraph markers. + outLines := strings.Split(outGroup.Text(), "\n") + if outLines[len(outLines)-1] == "" { + // chop off the extraneous last part + outLines = outLines[:len(outLines)-1] + } + // respect double-newline meaning actual newline + for i, line := range outLines { + if line == "" { + outLines[i] = "\n" + } + } + return strings.Join(outLines, " ") +} + +// PackageMarkers collects all the package-level marker values for the given package. +func PackageMarkers(col *Collector, pkg *loader.Package) (MarkerValues, error) { + markers, err := col.MarkersInPackage(pkg) + if err != nil { + return nil, err + } + res := make(MarkerValues) + for _, file := range pkg.Syntax { + fileMarkers := markers[file] + for name, vals := range fileMarkers { + res[name] = append(res[name], vals...) + } + } + + return res, nil +} + +// FieldInfo contains marker values and commonly used information for a struct field. +type FieldInfo struct { + // Name is the name of the field (or "" for embedded fields) + Name string + // Doc is the Godoc of the field, pre-processed to remove markers and joine + // single newlines together. + Doc string + // Tag struct tag associated with this field (or "" if non existed). + Tag reflect.StructTag + + // Markers are all registered markers associated with this field. + Markers MarkerValues + + // RawField is the raw, underlying field AST object that this field represents. + RawField *ast.Field +} + +// TypeInfo contains marker values and commonly used information for a type declaration. +type TypeInfo struct { + // Name is the name of the type. + Name string + // Doc is the Godoc of the type, pre-processed to remove markers and joine + // single newlines together. + Doc string + + // Markers are all registered markers associated with the type. + Markers MarkerValues + + // Fields are all the fields associated with the type, if it's a struct. + // (if not, Fields will be nil). + Fields []FieldInfo + + // RawDecl contains the raw GenDecl that the type was declared as part of. + RawDecl *ast.GenDecl + // RawSpec contains the raw Spec that declared this type. + RawSpec *ast.TypeSpec + // RawFile contains the file in which this type was declared. + RawFile *ast.File +} + +// TypeCallback is a callback called for each type declaration in a package. +type TypeCallback func(info *TypeInfo) + +// EachType collects all markers, then calls the given callback for each type declaration in a package. +// Each individual spec is considered separate, so +// +// type ( +// Foo string +// Bar int +// Baz struct{} +// ) +// +// yields three calls to the callback. +func EachType(col *Collector, pkg *loader.Package, cb TypeCallback) error { + markers, err := col.MarkersInPackage(pkg) + if err != nil { + return err + } + + loader.EachType(pkg, func(file *ast.File, decl *ast.GenDecl, spec *ast.TypeSpec) { + var fields []FieldInfo + if structSpec, isStruct := spec.Type.(*ast.StructType); isStruct { + for _, field := range structSpec.Fields.List { + for _, name := range field.Names { + fields = append(fields, FieldInfo{ + Name: name.Name, + Doc: extractDoc(field, nil), + Tag: loader.ParseAstTag(field.Tag), + Markers: markers[field], + RawField: field, + }) + } + if field.Names == nil { + fields = append(fields, FieldInfo{ + Doc: extractDoc(field, nil), + Tag: loader.ParseAstTag(field.Tag), + Markers: markers[field], + RawField: field, + }) + } + } + } + + cb(&TypeInfo{ + Name: spec.Name.Name, + Markers: markers[spec], + Doc: extractDoc(spec, decl), + Fields: fields, + RawDecl: decl, + RawSpec: spec, + RawFile: file, + }) + }) + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go new file mode 100644 index 000000000..8adfb3663 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/parser.go @@ -0,0 +1,267 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package rbac contain libraries for generating RBAC manifests from RBAC +// markers in Go source files. +// +// The markers take the form: +// +// +kubebuilder:rbac:groups=,resources=,resourceNames=,verbs=,urls= +package rbac + +import ( + "fmt" + "sort" + "strings" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +var ( + // RuleDefinition is a marker for defining RBAC rules. + // Call ToRule on the value to get a Kubernetes RBAC policy rule. + RuleDefinition = markers.Must(markers.MakeDefinition("kubebuilder:rbac", markers.DescribesPackage, Rule{})) +) + +// +controllertools:marker:generateHelp:category=RBAC + +// Rule specifies an RBAC rule to all access to some resources or non-resource URLs. +type Rule struct { + // Groups specifies the API groups that this rule encompasses. + Groups []string `marker:",optional"` + // Resources specifies the API resources that this rule encompasses. + Resources []string `marker:",optional"` + // ResourceNames specifies the names of the API resources that this rule encompasses. + // + // Create requests cannot be restricted by resourcename, as the object's name + // is not known at authorization time. + ResourceNames []string `marker:",optional"` + // Verbs specifies the (lowercase) kubernetes API verbs that this rule encompasses. + Verbs []string + // URL specifies the non-resource URLs that this rule encompasses. + URLs []string `marker:"urls,optional"` + // Namespace specifies the scope of the Rule. + // If not set, the Rule belongs to the generated ClusterRole. + // If set, the Rule belongs to a Role, whose namespace is specified by this field. + Namespace string `marker:",optional"` +} + +// ruleKey represents the resources and non-resources a Rule applies. +type ruleKey struct { + Groups string + Resources string + ResourceNames string + URLs string +} + +func (key ruleKey) String() string { + return fmt.Sprintf("%s + %s + %s + %s", key.Groups, key.Resources, key.ResourceNames, key.URLs) +} + +// ruleKeys implements sort.Interface +type ruleKeys []ruleKey + +func (keys ruleKeys) Len() int { return len(keys) } +func (keys ruleKeys) Swap(i, j int) { keys[i], keys[j] = keys[j], keys[i] } +func (keys ruleKeys) Less(i, j int) bool { return keys[i].String() < keys[j].String() } + +// key normalizes the Rule and returns a ruleKey object. +func (r *Rule) key() ruleKey { + r.normalize() + return ruleKey{ + Groups: strings.Join(r.Groups, "&"), + Resources: strings.Join(r.Resources, "&"), + ResourceNames: strings.Join(r.ResourceNames, "&"), + URLs: strings.Join(r.URLs, "&"), + } +} + +// addVerbs adds new verbs into a Rule. +// The duplicates in `r.Verbs` will be removed, and then `r.Verbs` will be sorted. +func (r *Rule) addVerbs(verbs []string) { + r.Verbs = removeDupAndSort(append(r.Verbs, verbs...)) +} + +// normalize removes duplicates from each field of a Rule, and sorts each field. +func (r *Rule) normalize() { + r.Groups = removeDupAndSort(r.Groups) + r.Resources = removeDupAndSort(r.Resources) + r.ResourceNames = removeDupAndSort(r.ResourceNames) + r.Verbs = removeDupAndSort(r.Verbs) + r.URLs = removeDupAndSort(r.URLs) +} + +// removeDupAndSort removes duplicates in strs, sorts the items, and returns a +// new slice of strings. +func removeDupAndSort(strs []string) []string { + set := make(map[string]bool) + for _, str := range strs { + if _, ok := set[str]; !ok { + set[str] = true + } + } + + var result []string + for str := range set { + result = append(result, str) + } + sort.Strings(result) + return result +} + +// ToRule converts this rule to its Kubernetes API form. +func (r *Rule) ToRule() rbacv1.PolicyRule { + // fix the group names first, since letting people type "core" is nice + for i, group := range r.Groups { + if group == "core" { + r.Groups[i] = "" + } + } + return rbacv1.PolicyRule{ + APIGroups: r.Groups, + Verbs: r.Verbs, + Resources: r.Resources, + ResourceNames: r.ResourceNames, + NonResourceURLs: r.URLs, + } +} + +// +controllertools:marker:generateHelp + +// Generator generates ClusterRole objects. +type Generator struct { + // RoleName sets the name of the generated ClusterRole. + RoleName string +} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := into.Register(RuleDefinition); err != nil { + return err + } + into.AddHelp(RuleDefinition, Rule{}.Help()) + return nil +} + +// GenerateRoles generate a slice of objs representing either a ClusterRole or a Role object +// The order of the objs in the returned slice is stable and determined by their namespaces. +func GenerateRoles(ctx *genall.GenerationContext, roleName string) ([]interface{}, error) { + rulesByNS := make(map[string][]*Rule) + for _, root := range ctx.Roots { + markerSet, err := markers.PackageMarkers(ctx.Collector, root) + if err != nil { + root.AddError(err) + } + + // group RBAC markers by namespace + for _, markerValue := range markerSet[RuleDefinition.Name] { + rule := markerValue.(Rule) + namespace := rule.Namespace + if _, ok := rulesByNS[namespace]; !ok { + rules := make([]*Rule, 0) + rulesByNS[namespace] = rules + } + rulesByNS[namespace] = append(rulesByNS[namespace], &rule) + } + } + + // NormalizeRules merge Rule with the same ruleKey and sort the Rules + NormalizeRules := func(rules []*Rule) []rbacv1.PolicyRule { + ruleMap := make(map[ruleKey]*Rule) + // all the Rules having the same ruleKey will be merged into the first Rule + for _, rule := range rules { + key := rule.key() + if _, ok := ruleMap[key]; !ok { + ruleMap[key] = rule + continue + } + ruleMap[key].addVerbs(rule.Verbs) + } + + // sort the Rules in rules according to their ruleKeys + keys := make([]ruleKey, 0, len(ruleMap)) + for key := range ruleMap { + keys = append(keys, key) + } + sort.Sort(ruleKeys(keys)) + + var policyRules []rbacv1.PolicyRule + for _, key := range keys { + policyRules = append(policyRules, ruleMap[key].ToRule()) + + } + return policyRules + } + + // collect all the namespaces and sort them + var namespaces []string + for ns := range rulesByNS { + namespaces = append(namespaces, ns) + } + sort.Strings(namespaces) + + // process the items in rulesByNS by the order specified in `namespaces` to make sure that the Role order is stable + var objs []interface{} + for _, ns := range namespaces { + rules := rulesByNS[ns] + policyRules := NormalizeRules(rules) + if len(policyRules) == 0 { + continue + } + if ns == "" { + objs = append(objs, rbacv1.ClusterRole{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterRole", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + }, + Rules: policyRules, + }) + } else { + objs = append(objs, rbacv1.Role{ + TypeMeta: metav1.TypeMeta{ + Kind: "Role", + APIVersion: rbacv1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: roleName, + Namespace: ns, + }, + Rules: policyRules, + }) + } + } + + return objs, nil +} + +func (g Generator) Generate(ctx *genall.GenerationContext) error { + objs, err := GenerateRoles(ctx, g.RoleName) + if err != nil { + return err + } + + if len(objs) == 0 { + return nil + } + + return ctx.WriteYAML("role.yaml", objs...) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go new file mode 100644 index 000000000..8418dcb1a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/rbac/zz_generated.markerhelp.go @@ -0,0 +1,77 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package rbac + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates ClusterRole objects.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "RoleName": markers.DetailedHelp{ + Summary: "sets the name of the generated ClusterRole.", + Details: "", + }, + }, + } +} + +func (Rule) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "RBAC", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies an RBAC rule to all access to some resources or non-resource URLs.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Groups": markers.DetailedHelp{ + Summary: "specifies the API groups that this rule encompasses.", + Details: "", + }, + "Resources": markers.DetailedHelp{ + Summary: "specifies the API resources that this rule encompasses.", + Details: "", + }, + "ResourceNames": markers.DetailedHelp{ + Summary: "specifies the names of the API resources that this rule encompasses. Create requests cannot be restricted by resourcename, as the object's name is not known at authorization time.", + Details: "", + }, + "Verbs": markers.DetailedHelp{ + Summary: "specifies the (lowercase) kubernetes API verbs that this rule encompasses.", + Details: "", + }, + "URLs": markers.DetailedHelp{ + Summary: "URL specifies the non-resource URLs that this rule encompasses.", + Details: "", + }, + "Namespace": markers.DetailedHelp{ + Summary: "specifies the scope of the Rule. If not set, the Rule belongs to the generated ClusterRole. If set, the Rule belongs to a Role, whose namespace is specified by this field.", + Details: "", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go new file mode 100644 index 000000000..3b233650c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/gen.go @@ -0,0 +1,508 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemapatcher + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "gopkg.in/yaml.v3" + apiext "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextlegacy "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + kyaml "sigs.k8s.io/yaml" + + crdgen "sigs.k8s.io/controller-tools/pkg/crd" + crdmarkers "sigs.k8s.io/controller-tools/pkg/crd/markers" + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" + yamlop "sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml" +) + +// NB(directxman12): this code is quite fragile, but there are a sufficient +// number of corner cases that it's hard to decompose into separate tools. +// When in doubt, ping @sttts. +// +// Namely: +// - It needs to only update existing versions +// - It needs to make "stable" changes that don't mess with map key ordering +// (in order to facilitate validating that no change has occurred) +// - It needs to collapse identical schema versions into a top-level schema, +// if all versions are identical (this is a common requirement to all CRDs, +// but in this case it means simple jsonpatch wouldn't suffice) + +// TODO(directxman12): When CRD v1 rolls around, consider splitting this into a +// tool that generates a patch, and a separate tool for applying stable YAML +// patches. + +var ( + legacyAPIExtVersion = apiextlegacy.SchemeGroupVersion.String() + currentAPIExtVersion = apiext.SchemeGroupVersion.String() +) + +// +controllertools:marker:generateHelp + +// Generator patches existing CRDs with new schemata. +// +// For legacy (v1beta1) single-version CRDs, it will simply replace the global schema. +// +// For legacy (v1beta1) multi-version CRDs, and any v1 CRDs, it will replace +// schemata of existing versions and *clear the schema* from any versions not +// specified in the Go code. It will *not* add new versions, or remove old +// ones. +// +// For legacy multi-version CRDs with identical schemata, it will take care of +// lifting the per-version schema up to the global schema. +// +// It will generate output for each "CRD Version" (API version of the CRD type +// itself) , e.g. apiextensions/v1beta1 and apiextensions/v1) available. +type Generator struct { + // ManifestsPath contains the CustomResourceDefinition YAML files. + ManifestsPath string `marker:"manifests"` + + // MaxDescLen specifies the maximum description length for fields in CRD's OpenAPI schema. + // + // 0 indicates drop the description for all fields completely. + // n indicates limit the description to at most n characters and truncate the description to + // closest sentence boundary if it exceeds n characters. + MaxDescLen *int `marker:",optional"` +} + +var _ genall.Generator = &Generator{} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + return crdmarkers.Register(into) +} + +func (g Generator) Generate(ctx *genall.GenerationContext) (result error) { + parser := &crdgen.Parser{ + Collector: ctx.Collector, + Checker: ctx.Checker, + } + + crdgen.AddKnownTypes(parser) + for _, root := range ctx.Roots { + parser.NeedPackage(root) + } + + metav1Pkg := crdgen.FindMetav1(ctx.Roots) + if metav1Pkg == nil { + // no objects in the roots, since nothing imported metav1 + return nil + } + + // load existing CRD manifests with group-kind and versions + partialCRDSets, err := crdsFromDirectory(ctx, g.ManifestsPath) + if err != nil { + return err + } + + // generate schemata for the types we care about, and save them to be written later. + for groupKind := range crdgen.FindKubeKinds(parser, metav1Pkg) { + existingSet, wanted := partialCRDSets[groupKind] + if !wanted { + continue + } + + for pkg, gv := range parser.GroupVersions { + if gv.Group != groupKind.Group { + continue + } + if _, wantedVersion := existingSet.Versions[gv.Version]; !wantedVersion { + continue + } + + typeIdent := crdgen.TypeIdent{Package: pkg, Name: groupKind.Kind} + parser.NeedFlattenedSchemaFor(typeIdent) + + fullSchema := parser.FlattenedSchemata[typeIdent] + if g.MaxDescLen != nil { + fullSchema = *fullSchema.DeepCopy() + crdgen.TruncateDescription(&fullSchema, *g.MaxDescLen) + } + existingSet.NewSchemata[gv.Version] = fullSchema + } + } + + // patch existing CRDs with new schemata + for _, existingSet := range partialCRDSets { + // first, figure out if we need to merge schemata together if they're *all* + // identical (meaning we also don't have any "unset" versions) + + if len(existingSet.NewSchemata) == 0 { + continue + } + + // copy over the new versions that we have, keeping old versions so + // that we can tell if a schema would be nil + var someVer string + for ver := range existingSet.NewSchemata { + someVer = ver + existingSet.Versions[ver] = struct{}{} + } + + allSame := true + firstSchema := existingSet.NewSchemata[someVer] + for ver := range existingSet.Versions { + otherSchema, hasSchema := existingSet.NewSchemata[ver] + if !hasSchema || !equality.Semantic.DeepEqual(firstSchema, otherSchema) { + allSame = false + break + } + } + + if allSame { + if err := existingSet.setGlobalSchema(); err != nil { + return fmt.Errorf("failed to set global firstSchema for %s: %w", existingSet.GroupKind, err) + } + } else { + if err := existingSet.setVersionedSchemata(); err != nil { + return fmt.Errorf("failed to set versioned schemas for %s: %w", existingSet.GroupKind, err) + } + } + } + + // write the final result out to the new location + for _, set := range partialCRDSets { + // We assume all CRD versions came from different files, since this + // is how controller-gen works. If they came from the same file, + // it'd be non-sensical, since you couldn't reasonably use kubectl + // with them against older servers. + for _, crd := range set.CRDVersions { + if err := func() error { + outWriter, err := ctx.OutputRule.Open(nil, crd.FileName) + if err != nil { + return err + } + defer outWriter.Close() + + enc := yaml.NewEncoder(outWriter) + // yaml.v2 defaults to indent=2, yaml.v3 defaults to indent=4, + // so be compatible with everything else in k8s and choose 2. + enc.SetIndent(2) + + return enc.Encode(crd.Yaml) + }(); err != nil { + return err + } + } + } + + return nil +} + +// partialCRDSet represents a set of CRDs of different apiext versions +// (v1beta1.CRD vs v1.CRD) that represent the same GroupKind. +// +// It tracks modifications to the schemata of those CRDs from this source file, +// plus some useful structured content, and keeps track of the raw YAML representation +// of the different apiext versions. +type partialCRDSet struct { + // GroupKind is the GroupKind represented by this CRD. + GroupKind schema.GroupKind + // NewSchemata are the new schemata generated from Go IDL by controller-gen. + NewSchemata map[string]apiext.JSONSchemaProps + // CRDVersions are the forms of this CRD across different apiextensions + // versions + CRDVersions []*partialCRD + // Versions are the versions of the given GroupKind in this set of CRDs. + Versions map[string]struct{} +} + +// partialCRD represents the raw YAML encoding of a given CRD instance, plus +// the versions contained therein for easy lookup. +type partialCRD struct { + // Yaml is the raw YAML structure of the CRD. + Yaml *yaml.Node + // FileName is the source name of the file that this was read from. + // + // This isn't on partialCRDSet because we could have different CRD versions + // stored in the same file (like controller-tools does by default) or in + // different files. + FileName string + + // CRDVersion is the version of the CRD object itself, from + // apiextensions (currently apiextensions/v1 or apiextensions/v1beta1). + CRDVersion string +} + +// setGlobalSchema sets the global schema for the v1beta1 apiext version in +// this set (if present, as per partialCRD.setGlobalSchema), and sets the +// versioned schemas (as per setVersionedSchemata) for the v1 version. +func (e *partialCRDSet) setGlobalSchema() error { + // there's no easy way to get a "random" key from a go map :-/ + var schema apiext.JSONSchemaProps + for ver := range e.NewSchemata { + schema = e.NewSchemata[ver] + break + } + for _, crdInfo := range e.CRDVersions { + switch crdInfo.CRDVersion { + case legacyAPIExtVersion: + if err := crdInfo.setGlobalSchema(schema); err != nil { + return err + } + case currentAPIExtVersion: + // just set the schemata as normal for non-legacy versions + if err := crdInfo.setVersionedSchemata(e.NewSchemata); err != nil { + return err + } + } + } + return nil +} + +// setGlobalSchema sets the global schema to one of the schemata +// for this CRD. All schemata must be identical for this to be a valid operation. +func (e *partialCRD) setGlobalSchema(newSchema apiext.JSONSchemaProps) error { + if e.CRDVersion != legacyAPIExtVersion { + // no global schema, nothing to do + return fmt.Errorf("cannot set global schema on non-legacy CRD versions") + } + schema, err := legacySchema(newSchema) + if err != nil { + return fmt.Errorf("failed to convert schema to legacy form: %w", err) + } + schemaNodeTree, err := yamlop.ToYAML(schema) + if err != nil { + return err + } + schemaNodeTree = schemaNodeTree.Content[0] // get rid of the document node + yamlop.SetStyle(schemaNodeTree, 0) // clear the style so it defaults to auto-style-choice + + if err := yamlop.SetNode(e.Yaml, *schemaNodeTree, "spec", "validation", "openAPIV3Schema"); err != nil { + return err + } + + versions, found, err := e.getVersionsNode() + if err != nil { + return err + } + if !found { + return nil + } + for i, verNode := range versions.Content { + if err := yamlop.DeleteNode(verNode, "schema"); err != nil { + return fmt.Errorf("spec.versions[%d]: %w", i, err) + } + } + + return nil +} + +// getVersionsNode gets the YAML node of .spec.versions YAML mapping, +// if returning the node, and whether or not it was present. +func (e *partialCRD) getVersionsNode() (*yaml.Node, bool, error) { + versions, found, err := yamlop.GetNode(e.Yaml, "spec", "versions") + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + if versions.Kind != yaml.SequenceNode { + return nil, true, fmt.Errorf("unexpected non-sequence versions") + } + return versions, found, nil +} + +// setVersionedSchemata sets the versioned schemata on each encoding in this set as per +// setVersionedSchemata on partialCRD. +func (e *partialCRDSet) setVersionedSchemata() error { + for _, crdInfo := range e.CRDVersions { + if err := crdInfo.setVersionedSchemata(e.NewSchemata); err != nil { + return err + } + } + return nil +} + +// setVersionedSchemata populates all existing versions with new schemata, +// wiping the schema of any version that doesn't have a listed schema. +// Any "unknown" versions are ignored. +func (e *partialCRD) setVersionedSchemata(newSchemata map[string]apiext.JSONSchemaProps) error { + var err error + if err := yamlop.DeleteNode(e.Yaml, "spec", "validation"); err != nil { + return err + } + + versions, found, err := e.getVersionsNode() + if err != nil { + return err + } + if !found { + return fmt.Errorf("unexpected missing versions") + } + + for i, verNode := range versions.Content { + nameNode, _, _ := yamlop.GetNode(verNode, "name") + if nameNode.Kind != yaml.ScalarNode || nameNode.ShortTag() != "!!str" { + return fmt.Errorf("version name was not a string at spec.versions[%d]", i) + } + name := nameNode.Value + if name == "" { + return fmt.Errorf("unexpected empty name at spec.versions[%d]", i) + } + newSchema, found := newSchemata[name] + if !found { + if err := yamlop.DeleteNode(verNode, "schema"); err != nil { + return fmt.Errorf("spec.versions[%d]: %w", i, err) + } + } else { + // TODO(directxman12): if this gets to be more than 2 versions, use polymorphism to clean this up + var verSchema interface{} = newSchema + if e.CRDVersion == legacyAPIExtVersion { + verSchema, err = legacySchema(newSchema) + if err != nil { + return fmt.Errorf("failed to convert schema to legacy form: %w", err) + } + } + + schemaNodeTree, err := yamlop.ToYAML(verSchema) + if err != nil { + return fmt.Errorf("failed to convert schema to YAML: %w", err) + } + schemaNodeTree = schemaNodeTree.Content[0] // get rid of the document node + yamlop.SetStyle(schemaNodeTree, 0) // clear the style so it defaults to an auto-chosen one + if err := yamlop.SetNode(verNode, *schemaNodeTree, "schema", "openAPIV3Schema"); err != nil { + return fmt.Errorf("spec.versions[%d]: %w", i, err) + } + } + } + return nil +} + +// crdsFromDirectory returns loads all CRDs from the given directory in a +// manner that preserves ordering, comments, etc in order to make patching +// minimally invasive. Returned CRDs are mapped by group-kind. +func crdsFromDirectory(ctx *genall.GenerationContext, dir string) (map[schema.GroupKind]*partialCRDSet, error) { + res := map[schema.GroupKind]*partialCRDSet{} + dirEntries, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + for _, fileInfo := range dirEntries { + // find all files that are YAML + if fileInfo.IsDir() || filepath.Ext(fileInfo.Name()) != ".yaml" { + continue + } + + rawContent, err := ctx.ReadFile(filepath.Join(dir, fileInfo.Name())) + if err != nil { + return nil, err + } + + // NB(directxman12): we could use the universal deserializer for this, but it's + // really pretty clunky, and the alternative is actually kinda easier to understand + + // ensure that this is a CRD + var typeMeta metav1.TypeMeta + if err := kyaml.Unmarshal(rawContent, &typeMeta); err != nil { + continue + } + if !isSupportedAPIExtGroupVer(typeMeta.APIVersion) || typeMeta.Kind != "CustomResourceDefinition" { + continue + } + + // collect the group-kind and versions from the actual structured form + var actualCRD crdIsh + if err := kyaml.Unmarshal(rawContent, &actualCRD); err != nil { + continue + } + groupKind := schema.GroupKind{Group: actualCRD.Spec.Group, Kind: actualCRD.Spec.Names.Kind} + var versions map[string]struct{} + if len(actualCRD.Spec.Versions) == 0 { + versions = map[string]struct{}{actualCRD.Spec.Version: struct{}{}} + } else { + versions = make(map[string]struct{}, len(actualCRD.Spec.Versions)) + for _, ver := range actualCRD.Spec.Versions { + versions[ver.Name] = struct{}{} + } + } + + // then actually unmarshal in a manner that preserves ordering, etc + var yamlNodeTree yaml.Node + if err := yaml.Unmarshal(rawContent, &yamlNodeTree); err != nil { + continue + } + + // then store this CRDVersion of the CRD in a set, populating the set if necessary + if res[groupKind] == nil { + res[groupKind] = &partialCRDSet{ + GroupKind: groupKind, + NewSchemata: make(map[string]apiext.JSONSchemaProps), + Versions: make(map[string]struct{}), + } + } + for ver := range versions { + res[groupKind].Versions[ver] = struct{}{} + } + res[groupKind].CRDVersions = append(res[groupKind].CRDVersions, &partialCRD{ + Yaml: &yamlNodeTree, + FileName: fileInfo.Name(), + CRDVersion: typeMeta.APIVersion, + }) + } + return res, nil +} + +// isSupportedAPIExtGroupVer checks if the given string-form group-version +// is one of the known apiextensions versions (v1, v1beta1). +func isSupportedAPIExtGroupVer(groupVer string) bool { + return groupVer == currentAPIExtVersion || groupVer == legacyAPIExtVersion +} + +// crdIsh is a merged blob of CRD fields that looks enough like all versions of +// CRD to extract the relevant information for partialCRDSet and partialCRD. +// +// We keep this separate so it's clear what info we need, and so we don't break +// when we switch canonical internal versions and lose old fields while gaining +// new ones (like in v1beta1 --> v1). +// +// Its use is tied directly to crdsFromDirectory, and is mostly an implementation detail of that. +type crdIsh struct { + Spec struct { + Group string `json:"group"` + Names struct { + Kind string `json:"kind"` + } `json:"names"` + Versions []struct { + Name string `json:"name"` + } `json:"versions"` + Version string `json:"version"` + } `json:"spec"` +} + +// legacySchema jumps through some hoops to convert a v1 schema to a v1beta1 schema. +func legacySchema(origSchema apiext.JSONSchemaProps) (apiextlegacy.JSONSchemaProps, error) { + shellCRD := apiext.CustomResourceDefinition{} + shellCRD.APIVersion = currentAPIExtVersion + shellCRD.Kind = "CustomResourceDefinition" + shellCRD.Spec.Versions = []apiext.CustomResourceDefinitionVersion{ + {Schema: &apiext.CustomResourceValidation{OpenAPIV3Schema: origSchema.DeepCopy()}}, + } + + legacyCRD, err := crdgen.AsVersion(shellCRD, apiextlegacy.SchemeGroupVersion) + if err != nil { + return apiextlegacy.JSONSchemaProps{}, err + } + + return *legacyCRD.(*apiextlegacy.CustomResourceDefinition).Spec.Validation.OpenAPIV3Schema, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/convert.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/convert.go new file mode 100644 index 000000000..b0ac00158 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/convert.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "encoding/json" + "fmt" + + "gopkg.in/yaml.v3" +) + +// ToYAML converts some object that serializes to JSON into a YAML node tree. +// It's useful since it pays attention to JSON tags, unlike yaml.Unmarshal or +// yaml.Node.Decode. +func ToYAML(rawObj interface{}) (*yaml.Node, error) { + if rawObj == nil { + return &yaml.Node{Kind: yaml.ScalarNode, Value: "null", Tag: "!!null"}, nil + } + + rawJSON, err := json.Marshal(rawObj) + if err != nil { + return nil, fmt.Errorf("failed to marshal object: %w", err) + } + + var out yaml.Node + if err := yaml.Unmarshal(rawJSON, &out); err != nil { + return nil, fmt.Errorf("unable to unmarshal marshalled object: %w", err) + } + return &out, nil +} + +// changeAll calls the given callback for all nodes in +// the given YAML node tree. +func changeAll(root *yaml.Node, cb func(*yaml.Node)) { + cb(root) + for _, child := range root.Content { + changeAll(child, cb) + } +} + +// SetStyle sets the style for all nodes in the given +// node tree to the given style. +func SetStyle(root *yaml.Node, style yaml.Style) { + changeAll(root, func(node *yaml.Node) { + node.Style = style + }) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/nested.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/nested.go new file mode 100644 index 000000000..70d43dbeb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/nested.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +// ValueInMapping finds the value node with the corresponding string key +// in the given mapping node. If the given node is not a mapping, an +// error will be returned. +func ValueInMapping(root *yaml.Node, key string) (*yaml.Node, error) { + if root.Kind != yaml.MappingNode { + return nil, fmt.Errorf("unexpected non-mapping node") + } + + for i := 0; i < len(root.Content)/2; i++ { + keyNode := root.Content[i*2] + if keyNode.Value == key { + return root.Content[i*2+1], nil + } + } + return nil, nil +} + +// asCloseAsPossible goes as deep on the given path as possible, returning the +// last node that existed from the given path in the given tree of mapping +// nodes, as well as the rest of the path that could not be fetched, if any. +func asCloseAsPossible(root *yaml.Node, path ...string) (*yaml.Node, []string, error) { + if root == nil { + return nil, path, nil + } + if root.Kind == yaml.DocumentNode && len(root.Content) > 0 { + root = root.Content[0] + } + + currNode := root + for ; len(path) > 0; path = path[1:] { + if currNode.Kind != yaml.MappingNode { + return nil, nil, fmt.Errorf("unexpected non-mapping (%v) before path %v", currNode.Kind, path) + } + + nextNode, err := ValueInMapping(currNode, path[0]) + if err != nil { + return nil, nil, fmt.Errorf("unable to get next node in path %v: %w", path, err) + } + + if nextNode == nil { + // we're as close as possible + break + } + + currNode = nextNode + } + + return currNode, path, nil +} + +// GetNode gets the node at the given path in the given sequence of mapping +// nodes, or, if it doesn't exist, returning false. +func GetNode(root *yaml.Node, path ...string) (*yaml.Node, bool, error) { + resNode, restPath, err := asCloseAsPossible(root, path...) + if err != nil { + return nil, false, err + } + // more path means the node didn't exist + if len(restPath) != 0 { + return nil, false, nil + } + return resNode, true, nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/set.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/set.go new file mode 100644 index 000000000..ede417f1c --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml/set.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +// SetNode sets the given path to the given yaml Node, creating mapping nodes along the way. +func SetNode(root *yaml.Node, val yaml.Node, path ...string) error { + currNode, path, err := asCloseAsPossible(root, path...) + if err != nil { + return err + } + + if len(path) > 0 { + if currNode.Kind != yaml.MappingNode { + return fmt.Errorf("unexpected non-mapping before path %v", path) + } + + for ; len(path) > 0; path = path[1:] { + keyNode := yaml.Node{Kind: yaml.ScalarNode, Tag: "!!str", Style: yaml.DoubleQuotedStyle, Value: path[0]} + nextNode := &yaml.Node{Kind: yaml.MappingNode} + currNode.Content = append(currNode.Content, &keyNode, nextNode) + + currNode = nextNode + } + } + + *currNode = val + return nil +} + +// DeleteNode deletes the node at the given path in the given tree of mapping nodes. +// It's a noop if the path doesn't exist. +func DeleteNode(root *yaml.Node, path ...string) error { + if len(path) == 0 { + return fmt.Errorf("must specify a path to delete") + } + pathToParent, keyToDelete := path[:len(path)-1], path[len(path)-1] + parentNode, path, err := asCloseAsPossible(root, pathToParent...) + if err != nil { + return err + } + if len(path) > 0 { + // no-op, parent node doesn't exist + return nil + } + + if parentNode.Kind != yaml.MappingNode { + return fmt.Errorf("unexpected non-mapping node") + } + + for i := 0; i < len(parentNode.Content)/2; i++ { + keyNode := parentNode.Content[i*2] + if keyNode.Value == keyToDelete { + parentNode.Content = append(parentNode.Content[:i*2], parentNode.Content[i*2+2:]...) + return nil + } + } + + // no-op, key not found in parent node + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go new file mode 100644 index 000000000..230cb9c06 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/schemapatcher/zz_generated.markerhelp.go @@ -0,0 +1,45 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package schemapatcher + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "patches existing CRDs with new schemata. ", + Details: "For legacy (v1beta1) single-version CRDs, it will simply replace the global schema. \n For legacy (v1beta1) multi-version CRDs, and any v1 CRDs, it will replace schemata of existing versions and *clear the schema* from any versions not specified in the Go code. It will *not* add new versions, or remove old ones. \n For legacy multi-version CRDs with identical schemata, it will take care of lifting the per-version schema up to the global schema. \n It will generate output for each \"CRD Version\" (API version of the CRD type itself) , e.g. apiextensions/v1beta1 and apiextensions/v1) available.", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "ManifestsPath": markers.DetailedHelp{ + Summary: "contains the CustomResourceDefinition YAML files.", + Details: "", + }, + "MaxDescLen": markers.DetailedHelp{ + Summary: "specifies the maximum description length for fields in CRD's OpenAPI schema. ", + Details: "0 indicates drop the description for all fields completely. n indicates limit the description to at most n characters and truncate the description to closest sentence boundary if it exceeds n characters.", + }, + }, + } +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go b/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go new file mode 100644 index 000000000..245826f69 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/version/version.go @@ -0,0 +1,49 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package version + +import ( + "fmt" + "runtime/debug" +) + +// Version returns the version of the main module +func Version() string { + info, ok := debug.ReadBuildInfo() + if !ok { + // binary has not been built with module support + return "(unknown)" + } + return info.Main.Version +} + +// Print prints the main module version on stdout. +// +// Print will display either: +// +// - "Version: v0.2.1" when the program has been compiled with: +// +// $ go get github.com/controller-tools/cmd/controller-gen@v0.2.1 +// +// Note: go modules requires the usage of semver compatible tags starting with +// 'v' to have nice human-readable versions. +// +// - "Version: (devel)" when the program is compiled from a local git checkout. +// +// - "Version: (unknown)" when not using go modules. +func Print() { + fmt.Printf("Version: %s\n", Version()) +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go new file mode 100644 index 000000000..b5d342048 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/parser.go @@ -0,0 +1,288 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package webhook contains libraries for generating webhookconfig manifests +// from markers in Go source files. +// +// The markers take the form: +// +// +kubebuilder:webhook:failurePolicy=,matchPolicy=,groups=<[]string>,resources=<[]string>,verbs=<[]string>,versions=<[]string>,name=,path=,mutating= +package webhook + +import ( + "fmt" + "strings" + + admissionreg "k8s.io/api/admissionregistration/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "sigs.k8s.io/controller-tools/pkg/genall" + "sigs.k8s.io/controller-tools/pkg/markers" +) + +var ( + // ConfigDefinition s a marker for defining Webhook manifests. + // Call ToWebhook on the value to get a Kubernetes Webhook. + ConfigDefinition = markers.Must(markers.MakeDefinition("kubebuilder:webhook", markers.DescribesPackage, Config{})) +) + +// +controllertools:marker:generateHelp:category=Webhook + +// Config specifies how a webhook should be served. +// +// It specifies only the details that are intrinsic to the application serving +// it (e.g. the resources it can handle, or the path it serves on). +type Config struct { + // Mutating marks this as a mutating webhook (it's validating only if false) + // + // Mutating webhooks are allowed to change the object in their response, + // and are called *before* all validating webhooks. Mutating webhooks may + // choose to reject an object, similarly to a validating webhook. + Mutating bool + // FailurePolicy specifies what should happen if the API server cannot reach the webhook. + // + // It may be either "ignore" (to skip the webhook and continue on) or "fail" (to reject + // the object in question). + FailurePolicy string + // MatchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" (match only if it exactly matches the specified rule) + // or "Equivalent" (match a request if it modifies a resource listed in rules, even via another API group or version). + MatchPolicy string `marker:",optional"` + + // Groups specifies the API groups that this webhook receives requests for. + Groups []string + // Resources specifies the API resources that this webhook receives requests for. + Resources []string + // Verbs specifies the Kubernetes API verbs that this webhook receives requests for. + // + // Only modification-like verbs may be specified. + // May be "create", "update", "delete", "connect", or "*" (for all). + Verbs []string + // Versions specifies the API versions that this webhook receives requests for. + Versions []string + + // Name indicates the name of this webhook configuration. Should be a domain with at least three segments separated by dots + Name string + + // Path specifies that path that the API server should connect to this webhook on. Must be + // prefixed with a '/validate-' or '/mutate-' depending on the type, and followed by + // $GROUP-$VERSION-$KIND where all values are lower-cased and the periods in the group + // are substituted for hyphens. For example, a validating webhook path for type + // batch.tutorial.kubebuilder.io/v1,Kind=CronJob would be + // /validate-batch-tutorial-kubebuilder-io-v1-cronjob + Path string +} + +// verbToAPIVariant converts a marker's verb to the proper value for the API. +// Unrecognized verbs are passed through. +func verbToAPIVariant(verbRaw string) admissionreg.OperationType { + switch strings.ToLower(verbRaw) { + case strings.ToLower(string(admissionreg.Create)): + return admissionreg.Create + case strings.ToLower(string(admissionreg.Update)): + return admissionreg.Update + case strings.ToLower(string(admissionreg.Delete)): + return admissionreg.Delete + case strings.ToLower(string(admissionreg.Connect)): + return admissionreg.Connect + case strings.ToLower(string(admissionreg.OperationAll)): + return admissionreg.OperationAll + default: + return admissionreg.OperationType(verbRaw) + } +} + +// ToMutatingWebhook converts this rule to its Kubernetes API form. +func (c Config) ToMutatingWebhook() (admissionreg.MutatingWebhook, error) { + if !c.Mutating { + return admissionreg.MutatingWebhook{}, fmt.Errorf("%s is a validating webhook", c.Name) + } + + matchPolicy, err := c.matchPolicy() + if err != nil { + return admissionreg.MutatingWebhook{}, err + } + + return admissionreg.MutatingWebhook{ + Name: c.Name, + Rules: c.rules(), + FailurePolicy: c.failurePolicy(), + MatchPolicy: matchPolicy, + ClientConfig: c.clientConfig(), + }, nil +} + +// ToValidatingWebhook converts this rule to its Kubernetes API form. +func (c Config) ToValidatingWebhook() (admissionreg.ValidatingWebhook, error) { + if c.Mutating { + return admissionreg.ValidatingWebhook{}, fmt.Errorf("%s is a mutating webhook", c.Name) + } + + matchPolicy, err := c.matchPolicy() + if err != nil { + return admissionreg.ValidatingWebhook{}, err + } + + return admissionreg.ValidatingWebhook{ + Name: c.Name, + Rules: c.rules(), + FailurePolicy: c.failurePolicy(), + MatchPolicy: matchPolicy, + ClientConfig: c.clientConfig(), + }, nil +} + +// rules returns the configuration of what operations on what +// resources/subresources a webhook should care about. +func (c Config) rules() []admissionreg.RuleWithOperations { + whConfig := admissionreg.RuleWithOperations{ + Rule: admissionreg.Rule{ + APIGroups: c.Groups, + APIVersions: c.Versions, + Resources: c.Resources, + }, + Operations: make([]admissionreg.OperationType, len(c.Verbs)), + } + + for i, verbRaw := range c.Verbs { + whConfig.Operations[i] = verbToAPIVariant(verbRaw) + } + + // fix the group names, since letting people type "core" is nice + for i, group := range whConfig.APIGroups { + if group == "core" { + whConfig.APIGroups[i] = "" + } + } + + return []admissionreg.RuleWithOperations{whConfig} +} + +// failurePolicy converts the string value to the proper value for the API. +// Unrecognized values are passed through. +func (c Config) failurePolicy() *admissionreg.FailurePolicyType { + var failurePolicy admissionreg.FailurePolicyType + switch strings.ToLower(c.FailurePolicy) { + case strings.ToLower(string(admissionreg.Ignore)): + failurePolicy = admissionreg.Ignore + case strings.ToLower(string(admissionreg.Fail)): + failurePolicy = admissionreg.Fail + default: + failurePolicy = admissionreg.FailurePolicyType(c.FailurePolicy) + } + return &failurePolicy +} + +// matchPolicy converts the string value to the proper value for the API. +func (c Config) matchPolicy() (*admissionreg.MatchPolicyType, error) { + var matchPolicy admissionreg.MatchPolicyType + switch strings.ToLower(c.MatchPolicy) { + case strings.ToLower(string(admissionreg.Exact)): + matchPolicy = admissionreg.Exact + case strings.ToLower(string(admissionreg.Equivalent)): + matchPolicy = admissionreg.Equivalent + case "": + return nil, nil + default: + return nil, fmt.Errorf("unknown value %q for matchPolicy", c.MatchPolicy) + } + return &matchPolicy, nil +} + +// clientConfig returns the client config for a webhook. +func (c Config) clientConfig() admissionreg.WebhookClientConfig { + path := c.Path + return admissionreg.WebhookClientConfig{ + Service: &admissionreg.ServiceReference{ + Name: "webhook-service", + Namespace: "system", + Path: &path, + }, + // OpenAPI marks the field as required before 1.13 because of a bug that got fixed in + // https://github.com/kubernetes/api/commit/e7d9121e9ffd63cea0288b36a82bcc87b073bd1b + // Put "\n" as an placeholder as a workaround til 1.13+ is almost everywhere. + CABundle: []byte("\n"), + } +} + +// +controllertools:marker:generateHelp + +// Generator generates (partial) {Mutating,Validating}WebhookConfiguration objects. +type Generator struct{} + +func (Generator) RegisterMarkers(into *markers.Registry) error { + if err := into.Register(ConfigDefinition); err != nil { + return err + } + into.AddHelp(ConfigDefinition, Config{}.Help()) + return nil +} + +func (Generator) Generate(ctx *genall.GenerationContext) error { + var mutatingCfgs []admissionreg.MutatingWebhook + var validatingCfgs []admissionreg.ValidatingWebhook + for _, root := range ctx.Roots { + markerSet, err := markers.PackageMarkers(ctx.Collector, root) + if err != nil { + root.AddError(err) + } + + for _, cfg := range markerSet[ConfigDefinition.Name] { + cfg := cfg.(Config) + if cfg.Mutating { + w, _ := cfg.ToMutatingWebhook() + mutatingCfgs = append(mutatingCfgs, w) + } else { + w, _ := cfg.ToValidatingWebhook() + validatingCfgs = append(validatingCfgs, w) + } + } + } + + var objs []interface{} + if len(mutatingCfgs) > 0 { + objs = append(objs, &admissionreg.MutatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "MutatingWebhookConfiguration", + APIVersion: admissionreg.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "mutating-webhook-configuration", + }, + Webhooks: mutatingCfgs, + }) + } + + if len(validatingCfgs) > 0 { + objs = append(objs, &admissionreg.ValidatingWebhookConfiguration{ + TypeMeta: metav1.TypeMeta{ + Kind: "ValidatingWebhookConfiguration", + APIVersion: admissionreg.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "validating-webhook-configuration", + }, + Webhooks: validatingCfgs, + }) + + } + + if len(objs) > 0 { + return ctx.WriteYAML("manifests.yaml", objs...) + } + + return nil +} diff --git a/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go new file mode 100644 index 000000000..3ef1d7cf8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-tools/pkg/webhook/zz_generated.markerhelp.go @@ -0,0 +1,84 @@ +// +build !ignore_autogenerated + +/* +Copyright2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by helpgen. DO NOT EDIT. + +package webhook + +import ( + "sigs.k8s.io/controller-tools/pkg/markers" +) + +func (Config) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "Webhook", + DetailedHelp: markers.DetailedHelp{ + Summary: "specifies how a webhook should be served. ", + Details: "It specifies only the details that are intrinsic to the application serving it (e.g. the resources it can handle, or the path it serves on).", + }, + FieldHelp: map[string]markers.DetailedHelp{ + "Mutating": markers.DetailedHelp{ + Summary: "marks this as a mutating webhook (it's validating only if false) ", + Details: "Mutating webhooks are allowed to change the object in their response, and are called *before* all validating webhooks. Mutating webhooks may choose to reject an object, similarly to a validating webhook.", + }, + "FailurePolicy": markers.DetailedHelp{ + Summary: "specifies what should happen if the API server cannot reach the webhook. ", + Details: "It may be either \"ignore\" (to skip the webhook and continue on) or \"fail\" (to reject the object in question).", + }, + "MatchPolicy": markers.DetailedHelp{ + Summary: "defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" (match only if it exactly matches the specified rule) or \"Equivalent\" (match a request if it modifies a resource listed in rules, even via another API group or version).", + Details: "", + }, + "Groups": markers.DetailedHelp{ + Summary: "specifies the API groups that this webhook receives requests for.", + Details: "", + }, + "Resources": markers.DetailedHelp{ + Summary: "specifies the API resources that this webhook receives requests for.", + Details: "", + }, + "Verbs": markers.DetailedHelp{ + Summary: "specifies the Kubernetes API verbs that this webhook receives requests for. ", + Details: "Only modification-like verbs may be specified. May be \"create\", \"update\", \"delete\", \"connect\", or \"*\" (for all).", + }, + "Versions": markers.DetailedHelp{ + Summary: "specifies the API versions that this webhook receives requests for.", + Details: "", + }, + "Name": markers.DetailedHelp{ + Summary: "indicates the name of this webhook configuration. Should be a domain with at least three segments separated by dots", + Details: "", + }, + "Path": markers.DetailedHelp{ + Summary: "specifies that path that the API server should connect to this webhook on. Must be prefixed with a '/validate-' or '/mutate-' depending on the type, and followed by $GROUP-$VERSION-$KIND where all values are lower-cased and the periods in the group are substituted for hyphens. For example, a validating webhook path for type batch.tutorial.kubebuilder.io/v1,Kind=CronJob would be /validate-batch-tutorial-kubebuilder-io-v1-cronjob", + Details: "", + }, + }, + } +} + +func (Generator) Help() *markers.DefinitionHelp { + return &markers.DefinitionHelp{ + Category: "", + DetailedHelp: markers.DetailedHelp{ + Summary: "generates (partial) {Mutating,Validating}WebhookConfiguration objects.", + Details: "", + }, + FieldHelp: map[string]markers.DetailedHelp{}, + } +}